package main

import (
	"context"
	"fmt"
	"log"

	"github.com/smart-snapshotter/pkg/dedup"
)

// MockStorageBackend implements a simple storage backend for demonstration
type MockStorageBackend struct {
	blocks map[string][]byte
	files  map[string][]byte
}

func NewMockStorageBackend() *MockStorageBackend {
	return &MockStorageBackend{
		blocks: make(map[string][]byte),
		files:  make(map[string][]byte),
	}
}

func (m *MockStorageBackend) StoreBlock(ctx context.Context, hash string, data []byte) error {
	m.blocks[hash] = data
	return nil
}

func (m *MockStorageBackend) LoadBlock(ctx context.Context, hash string) ([]byte, error) {
	data, exists := m.blocks[hash]
	if !exists {
		return nil, fmt.Errorf("block not found: %s", hash)
	}
	return data, nil
}

func (m *MockStorageBackend) DeleteBlock(ctx context.Context, hash string) error {
	delete(m.blocks, hash)
	return nil
}

func (m *MockStorageBackend) BlockExists(ctx context.Context, hash string) (bool, error) {
	_, exists := m.blocks[hash]
	return exists, nil
}

func (m *MockStorageBackend) StoreFile(ctx context.Context, hash string, data []byte) error {
	m.files[hash] = data
	return nil
}

func (m *MockStorageBackend) LoadFile(ctx context.Context, hash string) ([]byte, error) {
	data, exists := m.files[hash]
	if !exists {
		return nil, fmt.Errorf("file not found: %s", hash)
	}
	return data, nil
}

func (m *MockStorageBackend) DeleteFile(ctx context.Context, hash string) error {
	delete(m.files, hash)
	return nil
}

func (m *MockStorageBackend) FileExists(ctx context.Context, hash string) (bool, error) {
	_, exists := m.files[hash]
	return exists, nil
}

func (m *MockStorageBackend) StoreMetadata(ctx context.Context, key string, metadata []byte) error {
	return nil
}

func (m *MockStorageBackend) LoadMetadata(ctx context.Context, key string) ([]byte, error) {
	return nil, nil
}

func (m *MockStorageBackend) DeleteMetadata(ctx context.Context, key string) error {
	return nil
}

func (m *MockStorageBackend) Health(ctx context.Context) error {
	return nil
}

func main() {
	ctx := context.Background()

	// Create storage backend
	storage := NewMockStorageBackend()

	// Configure block deduplication engine
	config := dedup.Config{
		Enabled:             true,
		Algorithm:           "block",
		HashAlgorithm:         "blake3",
		ChunkingAlgorithm:     "rabin",
		MinChunkSize:          512,
		MaxChunkSize:          8192,
		AvgChunkSize:          2048,
		MinFileSize:           1024,
		Storage:               storage,
	}

	// Create deduplication engine
	engine, err := dedup.NewEngine(config)
	if err != nil {
		log.Fatalf("Failed to create deduplication engine: %v", err)
	}
	defer engine.Close()

	fmt.Println("=== Block-Level Deduplication Example ===")
	fmt.Println()

	// Example 1: Process a file with block deduplication
	fmt.Println("1. Processing file with block deduplication...")

	// Create sample file data
	fileData := []byte(`This is a sample file for block deduplication testing.
It contains multiple lines of text that will be processed using variable-size chunking.
The Rabin chunking algorithm will identify natural boundaries in the content.
This approach is more efficient than fixed-size chunking for many types of data.
Block-level deduplication can find duplicate content even within files.
It works by splitting files into chunks and identifying duplicate chunks.
This can significantly reduce storage requirements for similar files.`)

	filePath := "example.txt"

	// Process the file
	fileHash, err := engine.AddFile(ctx, filePath, fileData)
	if err != nil {
		log.Fatalf("Failed to add file: %v", err)
	}

	fmt.Printf("   File processed successfully\n")
	fmt.Printf("   File hash: %s\n", fileHash)
	fmt.Printf("   Original size: %d bytes\n", len(fileData))
	fmt.Println()

	// Example 2: Process blocks directly
	fmt.Println("2. Processing blocks directly...")

	blockData := []byte(`This is a test block for direct block processing.
It will be split into chunks using the configured chunking algorithm.`)

	blocks, err := engine.ProcessBlocks(ctx, blockData)
	if err != nil {
		log.Fatalf("Failed to process blocks: %v", err)
	}

	fmt.Printf("   Processed %d blocks\n", len(blocks))
	for i, block := range blocks {
		fmt.Printf("   Block %d: hash=%s, size=%d, offset=%d\n",
			i+1, block.Hash, block.Size, block.Offset)
	}
	fmt.Println()

	// Example 3: Reconstruct file from blocks
	fmt.Println("3. Reconstructing file from blocks...")

	reconstructed, err := engine.ReconstructFile(ctx, blocks)
	if err != nil {
		log.Fatalf("Failed to reconstruct file: %v", err)
	}

	fmt.Printf("   File reconstructed successfully\n")
	fmt.Printf("   Reconstructed size: %d bytes\n", len(reconstructed))
	fmt.Printf("   Data matches original: %t\n", string(reconstructed) == string(blockData))
	fmt.Println()

	// Example 4: Block duplicate detection
	fmt.Println("4. Testing duplicate block detection...")

	// Create similar data with some overlap
	similarData := []byte(`This is a test block for direct block processing.
It will be split into chunks using the configured chunking algorithm.
This additional line makes the content slightly different.`)

	similarBlocks, err := engine.ProcessBlocks(ctx, similarData)
	if err != nil {
		log.Fatalf("Failed to process similar blocks: %v", err)
	}

	// Count shared blocks
	sharedBlocks := 0
	for _, block1 := range blocks {
		for _, block2 := range similarBlocks {
			if block1.Hash == block2.Hash {
				sharedBlocks++
				break
			}
		}
	}

	fmt.Printf("   Found %d shared blocks between similar files\n", sharedBlocks)
	fmt.Printf("   Original blocks: %d, Similar blocks: %d\n", len(blocks), len(similarBlocks))
	fmt.Println()

	// Example 5: Get deduplication statistics
	fmt.Println("5. Deduplication statistics...")

	stats, err := engine.GetStats(ctx)
	if err != nil {
		log.Fatalf("Failed to get stats: %v", err)
	}

	fmt.Printf("   Total blocks: %d\n", stats.TotalBlocks)
	fmt.Printf("   Unique blocks: %d\n", stats.UniqueBlocks)
	fmt.Printf("   Total data size: %d bytes\n", stats.TotalDataSize)
	fmt.Printf("   Saved space: %d bytes\n", stats.SavedSpace)
	fmt.Printf("   Deduplication ratio: %.2f%%\n", stats.DeduplicationRatio*100)
	fmt.Printf("   Hash algorithm: %s\n", stats.HashAlgorithm)
	fmt.Printf("   Block size: %d bytes\n", stats.BlockSize)
	fmt.Println()

	// Example 6: Test different chunking algorithms
	fmt.Println("6. Testing different chunking algorithms...")

	algorithms := []struct {
		name    string
		chunker dedup.Chunker
	}{
		{"Fixed Size", dedup.NewFixedSizeChunker(1024)},
		{"Rabin", dedup.NewRabinChunker(512, 4096, 1024)},
		{"BuzHash", dedup.NewBuzHashChunker(512, 4096, 1024)},
		{"Content Defined", dedup.NewContentDefinedChunker(512, 4096, 1024)},
	}

	testData := []byte(`This is test data for comparing different chunking algorithms.
Each algorithm will split this data differently based on its own logic.
Some use fixed sizes, others use content-based boundaries.`)

	for _, algo := range algorithms {
		chunks := algo.chunker.Chunk(testData)
		fmt.Printf("   %s: %d chunks\n", algo.name, len(chunks))

		totalSize := 0
		for _, chunk := range chunks {
			totalSize += int(chunk.Size)
		}
		fmt.Printf("     Total size: %d bytes (original: %d bytes)\n", totalSize, len(testData))
	}
	fmt.Println()

	fmt.Println("=== Block-Level Deduplication Example Complete ===")
}