// pkg/dedup/engine.go
package dedup

import (
	"context"
	"crypto/sha256"
	"fmt"
	"io"
	"os"
	"path/filepath"
	"sync"

	"github.com/containerd/containerd/v2/core/mount"
	"github.com/onyx/snapshotter/pkg/metadata"
)

// Engine 去重引擎
type Engine struct {
	chunkDir  string
	chunkSize int64
	metaDB    metadata.Store
	mu        sync.RWMutex

	// 内存缓存：digest -> 块数据
	chunkCache map[string][]byte
}

// NewEngine 创建去重引擎
func NewEngine(chunkDir string, chunkSize int64, metaDB metadata.Store) (*Engine, error) {
	if err := os.MkdirAll(chunkDir, 0o755); err != nil {
		return nil, err
	}

	return &Engine{
		chunkDir:   chunkDir,
		chunkSize:  chunkSize,
		metaDB:     metaDB,
		chunkCache: make(map[string][]byte),
	}, nil
}

// Deduplicate 对快照进行去重处理
// 扫描文件系统，计算块哈希，建立映射关系
func (e *Engine) Deduplicate(ctx context.Context, mounts []mount.Mount) error {
	// 实现文件扫描和分块逻辑
	// 这里假设 mount 已经挂载到临时路径
	for _, mount := range mounts {
		if err := e.deduplicateDir(ctx, mount.Source); err != nil {
			return fmt.Errorf("dedup failed for %s: %w", mount.Source, err)
		}
	}
	return nil
}

// deduplicateDir 递归去重目录
func (e *Engine) deduplicateDir(ctx context.Context, dir string) error {
	return filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
		if err != nil {
			return err
		}

		if fi.IsDir() {
			return nil
		}

		// 对每个文件进行分块
		return e.deduplicateFile(ctx, path)
	})
}

// deduplicateFile 对单个文件进行分块去重
func (e *Engine) deduplicateFile(ctx context.Context, filePath string) error {
	file, err := os.Open(filePath)
	if err != nil {
		return fmt.Errorf("failed to open file %s: %w", filePath, err)
	}
	defer file.Close()

	buf := make([]byte, e.chunkSize)
	var offset int64 = 0

	for {
		n, err := file.Read(buf)
		if err != nil && err != io.EOF {
			return fmt.Errorf("failed to read file %s: %w", filePath, err)
		}
		if n == 0 {
			break
		}

		// 计算块哈希
		chunk := buf[:n]
		hash := sha256.Sum256(chunk)
		digest := fmt.Sprintf("%x", hash)

		// 检查缓存
		e.mu.RLock()
		_, exists := e.chunkCache[digest]
		e.mu.RUnlock()

		if !exists {
			// 保存新块
			chunkPath := filepath.Join(e.chunkDir, digest[:2], digest[2:])
			if err := os.MkdirAll(filepath.Dir(chunkPath), 0o755); err != nil {
				return fmt.Errorf("failed to create chunk dir: %w", err)
			}

			if err := os.WriteFile(chunkPath, chunk, 0o644); err != nil {
				return fmt.Errorf("failed to write chunk: %w", err)
			}

			// 更新缓存
			e.mu.Lock()
			e.chunkCache[digest] = chunk
			e.mu.Unlock()
		}

		// 根据是否存在决定是保存新块还是增加引用
		if !exists {
			// 保存新块
			chunkPath := filepath.Join(e.chunkDir, digest[:2], digest[2:])
			if err := os.MkdirAll(filepath.Dir(chunkPath), 0o755); err != nil {
				return fmt.Errorf("failed to create chunk dir: %w", err)
			}

			if err := os.WriteFile(chunkPath, chunk, 0o644); err != nil {
				return fmt.Errorf("failed to write chunk: %w", err)
			}

			// 更新缓存
			e.mu.Lock()
			e.chunkCache[digest] = chunk
			e.mu.Unlock()

			// 保存块信息到元数据存储
			chunkInfo := &metadata.ChunkInfo{
				Digest:   digest,
				Size:     int64(n),
				Offset:   offset,
				RefCount: 1,
			}
			if err := e.metaDB.SaveChunk(ctx, digest, chunkInfo); err != nil {
				return fmt.Errorf("failed to save chunk metadata: %w", err)
			}
		} else {
			// 增加引用计数
			if err := e.metaDB.IncrementChunkRef(ctx, digest); err != nil {
				return fmt.Errorf("failed to increment chunk ref: %w", err)
			}
		}

		offset += int64(n)
	}

	return nil
}

// computeChunkDigest 计算块的摘要（SHA256）
func (e *Engine) computeChunkDigest(data []byte) string {
	h := sha256.Sum256(data)
	return fmt.Sprintf("%x", h)
}

// storeChunk 存储块数据，执行去重
func (e *Engine) storeChunk(ctx context.Context, digest string, data []byte) error {
	e.mu.Lock()
	defer e.mu.Unlock()

	// 检查块是否已存在
	if _, exists := e.chunkCache[digest]; exists {
		return nil // 已去重
	}

	// 在磁盘上存储块
	chunkPath := filepath.Join(e.chunkDir, digest[:2], digest[2:])
	if err := os.MkdirAll(filepath.Dir(chunkPath), 0o755); err != nil {
		return err
	}

	if err := os.WriteFile(chunkPath, data, 0o644); err != nil {
		return err
	}

	// 缓存在内存中
	e.chunkCache[digest] = data

	return nil
}

// GetChunk 获取块数据
func (e *Engine) GetChunk(ctx context.Context, digest string) ([]byte, error) {
	e.mu.RLock()
	defer e.mu.RUnlock()

	// 先查内存缓存
	if data, exists := e.chunkCache[digest]; exists {
		return data, nil
	}

	// 从磁盘读取
	chunkPath := filepath.Join(e.chunkDir, digest[:2], digest[2:])
	data, err := os.ReadFile(chunkPath)
	if err != nil {
		return nil, fmt.Errorf("chunk not found: %s", digest)
	}

	return data, nil
}

// CleanupSnapshot 清理快照相关资源
func (e *Engine) CleanupSnapshot(ctx context.Context, key string) error {
	// 获取此快照引用的所有块
	chunks, err := e.metaDB.ListChunksBySnapshot(key)
	if err != nil {
		return fmt.Errorf("failed to list chunks for snapshot %s: %w", snapshotKey, err)
	}

	for _, chunk := range chunks {
		// 获取最新的引用计数
		info, err := e.metaDB.GetChunkInfo(ctx, chunk.Digest)
		if err != nil || info == nil {
			continue
		}

		// 引用计数减一
		newRefCount := info.RefCount - 1
		if err := e.metaDB.UpdateChunkRef(ctx, chunk.Digest, newRefCount); err != nil {
			continue
		}

		// 如果没有引用了，删除块
		if newRefCount <= 0 {
			chunkPath := filepath.Join(e.chunkDir, chunk.Digest[:2], chunk.Digest[2:])
			if err := os.Remove(chunkPath); err != nil {
				// 记录日志但继续处理其他块
				continue
			}

			// 从内存缓存中移除
			e.mu.Lock()
			delete(e.chunkCache, chunk.Digest)
			e.mu.Unlock()
		}
	}

	return nil
}

// VerifyChunk 验证块的完整性
func (e *Engine) VerifyChunk(ctx context.Context, digest string, data []byte) bool {
	computed := e.computeChunkDigest(data)
	return computed == digest
}

// ComputeStats 计算去重统计信息
func (e *Engine) ComputeStats(ctx context.Context) (map[string]interface{}, error) {
	e.mu.RLock()
	defer e.mu.RUnlock()

	chunks, err := e.metaDB.ListChunks(ctx)
	if err != nil {
		return nil, err
	}

	totalSize := int64(0)
	uniqueSize := int64(0)
	deduped := int64(0)

	for _, chunk := range chunks {
		totalSize += chunk.Size
		if chunk.RefCount == 1 {
			uniqueSize += chunk.Size
		} else {
			deduped += chunk.Size * int64(chunk.RefCount-1)
		}
	}

	stats := map[string]interface{}{
		"total_chunks":  len(chunks),
		"total_size":    totalSize,
		"unique_size":   uniqueSize,
		"deduped_bytes": deduped,
		"dedup_ratio":   float64(deduped) / float64(totalSize),
	}

	return stats, nil
}
