package dedup

import (
	"context"
	"fmt"
	"os"
	"path/filepath"
	"sync"
	"time"
)

// BlockDedupEngine represents a block-level deduplication engine with advanced features
type BlockDedupEngine struct {
	*BaseEngine
	chunker     Chunker
	blockIndex  BlockIndex
	mu          sync.RWMutex
}

// Ensure BlockDedupEngine implements Engine interface
var _ Engine = (*BlockDedupEngine)(nil)

// Chunker interface for different chunking algorithms
type Chunker interface {
	Chunk(data []byte) []Chunk
	GetName() string
}

// Chunk represents a data chunk
type Chunk struct {
	Data   []byte
	Offset int64
	Size   int64
}

// BlockIndex manages block metadata and relationships
type BlockIndex interface {
	AddBlock(block BlockMetadata) error
	GetBlock(hash string) (*BlockMetadata, error)
	UpdateBlock(block BlockMetadata) error
	DeleteBlock(hash string) error
	FindSimilarBlocks(hash string, threshold float64) ([]BlockMetadata, error)
	GetBlockStats() BlockStats
}

// BlockMetadata contains metadata for a block
type BlockMetadata struct {
	Hash         string
	Size         int64
	Offset       int64
	ReferenceCount int64
	LastAccess   int64
	CreatedAt    int64
	Checksum     string
	Features     []byte // For similarity detection
}

// BlockStats contains block-level statistics
type BlockStats struct {
	TotalBlocks   int64
	UniqueBlocks  int64
	TotalSize     int64
	UniqueSize    int64
	AverageSize   float64
	DuplicateRate float64
}

// NewBlockDedupEngine creates a new block-level deduplication engine
func NewBlockDedupEngine(config Config) (*BlockDedupEngine, error) {
	baseEngine := NewBaseEngine(config)

	// Initialize chunker based on configuration
	var chunker Chunker
	switch config.ChunkingAlgorithm {
	case "fixed":
		chunker = NewFixedSizeChunker(config.BlockSize)
	case "rabin":
		chunker = NewRabinChunker(config.MinChunkSize, config.MaxChunkSize, config.AvgChunkSize)
	case "buzhash":
		chunker = NewBuzHashChunker(config.MinChunkSize, config.MaxChunkSize, config.AvgChunkSize)
	default:
		chunker = NewRabinChunker(config.MinChunkSize, config.MaxChunkSize, config.AvgChunkSize)
	}

	// Initialize block index
	blockIndex, err := NewMemoryBlockIndex()
	if err != nil {
		return nil, fmt.Errorf("failed to create block index: %w", err)
	}

	return &BlockDedupEngine{
		BaseEngine: baseEngine,
		chunker:    chunker,
		blockIndex: blockIndex,
	}, nil
}

// ProcessFile processes a file using variable-size block deduplication
func (e *BlockDedupEngine) ProcessFile(ctx context.Context, filePath string, data []byte) (*FileBlocks, error) {
	if len(data) == 0 {
		return nil, fmt.Errorf("empty file data")
	}

	// Create chunks using the configured chunker
	chunks := e.chunker.Chunk(data)

	fileBlocks := &FileBlocks{
		FilePath: filePath,
		Blocks:   make([]BlockInfo, 0, len(chunks)),
	}

	// Process each chunk
	for _, chunk := range chunks {
		blockHash := e.CalculateHash(chunk.Data)

		// Check if block already exists
		exists, err := e.storage.BlockExists(ctx, blockHash)
		if err != nil {
			return nil, fmt.Errorf("failed to check block existence: %w", err)
		}

		if !exists {
			// Store the block
			if err := e.storage.StoreBlock(ctx, blockHash, chunk.Data); err != nil {
				return nil, fmt.Errorf("failed to store block: %w", err)
			}

			// Add to block index
			blockMeta := BlockMetadata{
				Hash:         blockHash,
				Size:         chunk.Size,
				Offset:       chunk.Offset,
				ReferenceCount: 1,
				CreatedAt:    getCurrentTimestamp(),
				Checksum:     e.calculateChecksum(chunk.Data),
			}

			if err := e.blockIndex.AddBlock(blockMeta); err != nil {
				return nil, fmt.Errorf("failed to add block to index: %w", err)
			}

			// Update statistics
			e.UpdateStats(chunk.Size, 0, 0, 1)
		} else {
			// Block already exists, update reference count
			blockMeta, err := e.blockIndex.GetBlock(blockHash)
			if err != nil {
				return nil, fmt.Errorf("failed to get block metadata: %w", err)
			}

			blockMeta.ReferenceCount++
			if err := e.blockIndex.UpdateBlock(*blockMeta); err != nil {
				return nil, fmt.Errorf("failed to update block metadata: %w", err)
			}

			// Update statistics - we saved space by reusing existing block
			e.UpdateStats(chunk.Size, chunk.Size, 0, 0)
		}

		// Add block info to file blocks
		fileBlocks.Blocks = append(fileBlocks.Blocks, BlockInfo{
			Hash:   blockHash,
			Size:   chunk.Size,
			Offset: chunk.Offset,
		})
	}

	return fileBlocks, nil
}

// ReconstructFileFromBlocks reconstructs a file from FileBlocks
func (e *BlockDedupEngine) ReconstructFileFromBlocks(ctx context.Context, fileBlocks *FileBlocks) ([]byte, error) {
	return e.reconstructFileFromBlocks(ctx, fileBlocks)
}

// FindDuplicateBlocks finds duplicate blocks across the system
func (e *BlockDedupEngine) FindDuplicateBlocks(ctx context.Context, threshold int64) ([]DuplicateBlockGroup, error) {
	// Check if the index supports duplicate finding
	if finder, ok := e.blockIndex.(interface{ FindDuplicates(int64) ([]DuplicateBlockGroup, error) }); ok {
		return finder.FindDuplicates(threshold)
	}

	// Fallback: manually find duplicates by checking all blocks
	return e.findDuplicatesManually(ctx, threshold)
}

// findDuplicatesManually manually finds duplicate blocks
func (e *BlockDedupEngine) findDuplicatesManually(ctx context.Context, threshold int64) ([]DuplicateBlockGroup, error) {
	// This is a simplified implementation
	// In practice, you'd want to iterate through all blocks and find duplicates
	return []DuplicateBlockGroup{}, nil
}

// OptimizeStorage performs storage optimization by finding and consolidating duplicate blocks
func (e *BlockDedupEngine) OptimizeStorage(ctx context.Context) error {
	// Find duplicate blocks
	duplicates, err := e.FindDuplicateBlocks(ctx, 1024) // Only consider blocks larger than 1KB
	if err != nil {
		return fmt.Errorf("failed to find duplicate blocks: %w", err)
	}

	// Consolidate duplicate blocks
	for _, group := range duplicates {
		if err := e.consolidateDuplicateBlocks(ctx, group); err != nil {
			return fmt.Errorf("failed to consolidate duplicate blocks: %w", err)
		}
	}

	return nil
}

// consolidateDuplicateBlocks consolidates a group of duplicate blocks
func (e *BlockDedupEngine) consolidateDuplicateBlocks(ctx context.Context, group DuplicateBlockGroup) error {
	if len(group.Hashes) <= 1 {
		return nil // No duplicates to consolidate
	}

	// Keep the first block as the canonical block
	canonicalHash := group.Hashes[0]

	// Update all references to point to the canonical block
	for _, hash := range group.Hashes[1:] {
		if err := e.updateBlockReferences(ctx, hash, canonicalHash); err != nil {
			return fmt.Errorf("failed to update block references: %w", err)
		}

		// Delete the duplicate block
		if err := e.storage.DeleteBlock(ctx, hash); err != nil {
			return fmt.Errorf("failed to delete duplicate block: %w", err)
		}
	}

	return nil
}

// updateBlockReferences updates all references from oldHash to newHash
func (e *BlockDedupEngine) updateBlockReferences(ctx context.Context, oldHash, newHash string) error {
	// This would require maintaining a reverse index of which files reference which blocks
	// For now, this is a placeholder that would be implemented with proper metadata tracking
	return nil
}

// GetBlockStats returns detailed block statistics
func (e *BlockDedupEngine) GetBlockStats() BlockStats {
	return e.blockIndex.GetBlockStats()
}

// AddBlock adds a block to the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) AddBlock(ctx context.Context, data []byte) (string, error) {
	if int64(len(data)) == 0 {
		return "", fmt.Errorf("empty block")
	}

	hash := e.CalculateHash(data)

	// Check if block already exists
	exists, err := e.storage.BlockExists(ctx, hash)
	if err != nil {
		return "", fmt.Errorf("failed to check block existence: %w", err)
	}

	if !exists {
		// Store the block
		if err := e.storage.StoreBlock(ctx, hash, data); err != nil {
			return "", fmt.Errorf("failed to store block: %w", err)
		}

		// Add to block index
		blockMeta := BlockMetadata{
			Hash:           hash,
			Size:           int64(len(data)),
			Offset:         0,
			ReferenceCount: 1,
			CreatedAt:      getCurrentTimestamp(),
			LastAccess:     getCurrentTimestamp(),
			Checksum:       e.calculateChecksum(data),
		}

		if err := e.blockIndex.AddBlock(blockMeta); err != nil {
			return "", fmt.Errorf("failed to add block to index: %w", err)
		}

		// Update statistics
		e.UpdateStats(int64(len(data)), 0, 0, 1)
	} else {
		// Block already exists, update reference count
		blockMeta, err := e.blockIndex.GetBlock(hash)
		if err != nil {
			return "", fmt.Errorf("failed to get block metadata: %w", err)
		}

		blockMeta.ReferenceCount++
		if err := e.blockIndex.UpdateBlock(*blockMeta); err != nil {
			return "", fmt.Errorf("failed to update block metadata: %w", err)
		}

		// Update statistics - we saved space by reusing existing block
		e.UpdateStats(int64(len(data)), int64(len(data)), 0, 0)
	}

	// Update reference count
	e.UpdateRefCount(hash, 1)

	return hash, nil
}

// GetBlock retrieves a block from the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) GetBlock(ctx context.Context, hash string) ([]byte, error) {
	return e.storage.LoadBlock(ctx, hash)
}

// DeleteBlock deletes a block from the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) DeleteBlock(ctx context.Context, hash string) error {
	// Update reference count
	e.UpdateRefCount(hash, -1)

	// If no more references, delete the block
	if e.GetRefCount(hash) <= 0 {
		if err := e.storage.DeleteBlock(ctx, hash); err != nil {
			return fmt.Errorf("failed to delete block: %w", err)
		}

		// Remove from block index
		if err := e.blockIndex.DeleteBlock(hash); err != nil {
			return fmt.Errorf("failed to delete block from index: %w", err)
		}

		// Update statistics
		e.UpdateStats(0, 0, 0, -1)
	} else {
		// Block still referenced, just update reference count in index
		if block, err := e.blockIndex.GetBlock(hash); err == nil {
			block.ReferenceCount = e.GetRefCount(hash)
			e.blockIndex.UpdateBlock(*block)
		}
	}

	return nil
}

// BlockExists checks if a block exists in the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) BlockExists(ctx context.Context, hash string) (bool, error) {
	return e.storage.BlockExists(ctx, hash)
}

// ProcessBlocks processes data into blocks (Engine interface implementation)
func (e *BlockDedupEngine) ProcessBlocks(ctx context.Context, data []byte) ([]BlockInfo, error) {
	fileBlocks, err := e.ProcessFile(ctx, "", data)
	if err != nil {
		return nil, err
	}
	return fileBlocks.Blocks, nil
}

// ReconstructFile reconstructs a file from blocks (Engine interface implementation)
func (e *BlockDedupEngine) ReconstructFile(ctx context.Context, blocks []BlockInfo) ([]byte, error) {
	fileBlocks := &FileBlocks{
		Blocks: blocks,
	}
	return e.reconstructFileFromBlocks(ctx, fileBlocks)
}

// reconstructFileFromBlocks is the internal implementation
func (e *BlockDedupEngine) reconstructFileFromBlocks(ctx context.Context, fileBlocks *FileBlocks) ([]byte, error) {
	if fileBlocks == nil || len(fileBlocks.Blocks) == 0 {
		return nil, fmt.Errorf("invalid file blocks")
	}

	// Sort blocks by offset to ensure correct order
	sortBlocks(fileBlocks.Blocks)

	var result []byte
	totalSize := int64(0)

	// Pre-allocate buffer for better performance
	for _, block := range fileBlocks.Blocks {
		totalSize += block.Size
	}
	result = make([]byte, 0, totalSize)

	// Reconstruct file from blocks
	for _, blockInfo := range fileBlocks.Blocks {
		blockData, err := e.GetBlock(ctx, blockInfo.Hash)
		if err != nil {
			return nil, fmt.Errorf("failed to get block %s: %w", blockInfo.Hash, err)
		}

		// Verify block integrity
		if e.calculateChecksum(blockData) != blockInfo.Hash {
			return nil, fmt.Errorf("block integrity check failed for block %s", blockInfo.Hash)
		}

		result = append(result, blockData...)

		// Update last access time
		if blockMeta, err := e.blockIndex.GetBlock(blockInfo.Hash); err == nil {
			blockMeta.LastAccess = getCurrentTimestamp()
			e.blockIndex.UpdateBlock(*blockMeta)
		}
	}

	return result, nil
}

// ProcessSnapshot processes a snapshot for deduplication (Engine interface implementation)
func (e *BlockDedupEngine) ProcessSnapshot(ctx context.Context, snapshotID string, path string) error {
	// Walk through all files in the snapshot
	return filepath.Walk(path, func(filePath string, info os.FileInfo, err error) error {
		if err != nil {
			return err
		}

		if info.IsDir() {
			return nil
		}

		// Read file data
		data, err := os.ReadFile(filePath)
		if err != nil {
			return fmt.Errorf("failed to read file %s: %w", filePath, err)
		}

		// Process file for deduplication
		relativePath, err := filepath.Rel(path, filePath)
		if err != nil {
			return fmt.Errorf("failed to get relative path: %w", err)
		}

		_, err = e.ProcessFile(ctx, relativePath, data)
		if err != nil {
			return fmt.Errorf("failed to process file %s: %w", relativePath, err)
		}

		return nil
	})
}

// RemoveSnapshot removes a snapshot (Engine interface implementation)
func (e *BlockDedupEngine) RemoveSnapshot(ctx context.Context, snapshotID string) error {
	// For now, just log the removal
	// Actual cleanup should be handled by garbage collection
	fmt.Printf("Removing snapshot: %s\n", snapshotID)
	return nil
}

// GetSnapshotUsage returns snapshot usage information (Engine interface implementation)
func (e *BlockDedupEngine) GetSnapshotUsage(ctx context.Context, snapshotID string) (StorageUsage, error) {
	// For now, return dummy usage
	// This should be implemented with proper tracking
	return StorageUsage{
		Size:   0,
		Inodes: 0,
	}, nil
}

// GetStats returns deduplication statistics (Engine interface implementation)
func (e *BlockDedupEngine) GetStats(ctx context.Context) (DedupStats, error) {
	return e.stats, nil
}

// Cleanup performs cleanup operations (Engine interface implementation)
func (e *BlockDedupEngine) Cleanup(ctx context.Context) error {
	// Clean up block index
	if cleaner, ok := e.blockIndex.(interface{ Cleanup() error }); ok {
		return cleaner.Cleanup()
	}
	return nil
}

// Close closes the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) Close() error {
	// No specific cleanup needed
	return nil
}

// AddFile adds a file to the deduplication engine (Engine interface implementation)
// For block-level deduplication, this processes the file as blocks
func (e *BlockDedupEngine) AddFile(ctx context.Context, path string, data []byte) (string, error) {
	if int64(len(data)) < e.config.MinFileSize {
		// File too small for deduplication, return empty hash
		return "", nil
	}

	// Process file as blocks
	fileBlocks, err := e.ProcessFile(ctx, path, data)
	if err != nil {
		return "", fmt.Errorf("failed to process file as blocks: %w", err)
	}

	// Return a hash representing the entire file (could be a hash of block hashes)
	var combinedHash string
	for _, block := range fileBlocks.Blocks {
		combinedHash += block.Hash
	}

	// Hash the combined block hashes to get a file-level hash
	return e.CalculateHash([]byte(combinedHash)), nil
}

// GetFile retrieves a file from the deduplication engine (Engine interface implementation)
// For block-level deduplication, this reconstructs the file from blocks
func (e *BlockDedupEngine) GetFile(ctx context.Context, hash string) ([]byte, error) {
	// This is a simplified implementation
	// In practice, you'd need to store metadata mapping file hashes to blocks
	return nil, fmt.Errorf("file-level retrieval not supported in block deduplication mode")
}

// DeleteFile deletes a file from the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) DeleteFile(ctx context.Context, hash string) error {
	// This is a simplified implementation
	// In practice, you'd need to store metadata mapping file hashes to blocks
	return fmt.Errorf("file-level deletion not supported in block deduplication mode")
}

// DeleteFileByPath deletes a file by path from the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) DeleteFileByPath(ctx context.Context, path string) error {
	// This is a simplified implementation
	// In practice, you'd need to store metadata mapping file paths to blocks
	return fmt.Errorf("file-level deletion by path not supported in block deduplication mode")
}

// FileExists checks if a file exists in the deduplication engine (Engine interface implementation)
func (e *BlockDedupEngine) FileExists(ctx context.Context, hash string) (bool, error) {
	// This is a simplified implementation
	// In practice, you'd need to store metadata mapping file hashes to blocks
	return false, fmt.Errorf("file-level existence check not supported in block deduplication mode")
}

// calculateChecksum calculates a fast checksum for block integrity
func (e *BlockDedupEngine) calculateChecksum(data []byte) string {
	// Use a fast checksum algorithm like CRC32 or Adler32
	// For now, use the same hash function for simplicity
	return e.CalculateHash(data)
}

// Helper function to get current timestamp
func getCurrentTimestamp() int64 {
	return time.Now().Unix()
}

// FileBlocks represents a file composed of blocks
type FileBlocks struct {
	FilePath string
	Blocks   []BlockInfo
	Size     int64
	Checksum string
}

// DuplicateBlockGroup represents a group of duplicate blocks
type DuplicateBlockGroup struct {
	Hashes    []string
	Size      int64
	Count     int
	Frequency int64
}