// Package storage provides block-based storage implementation
package storage

import (
	"context"
	"encoding/json"
	"fmt"
	"hash/crc32"
	"os"
	"path/filepath"
	"time"

	"github.com/smart-snapshotter/pkg/types"
)

// BlockStorageEngine provides block-based storage operations
type BlockStorageEngine struct {
	*BaseBackend
	blockSize    int
	metadataBackend MetadataBackend
}

// BlockConfig represents block storage configuration
type BlockConfig struct {
	Config
	BlockSize       int    // Block size in bytes (default 4KB)
	MetadataBackend string // Type of metadata backend (sqlite, boltdb, file)
}

// BlockInfo represents block information
type BlockInfo struct {
	Hash      string
	Size      int64
	Offset    int64
	CRC32     uint32
	Reference int64
}

// BlockMetadata represents block metadata
type BlockMetadata struct {
	Hash      string            `json:"hash"`
	Size      int64             `json:"size"`
	Offset    int64             `json:"offset"`
	CRC32     uint32            `json:"crc32"`
	Reference int64             `json:"reference"`
	CreatedAt int64             `json:"created_at"`
	Checksum  map[string]string `json:"checksum"`
}

// NewBlockStorageEngine creates a new block-based storage backend
func NewBlockStorageEngine(config BlockConfig) (*BlockStorageEngine, error) {
	base := NewBaseBackend(config.Config)
	if err := base.Initialize(); err != nil {
		return nil, fmt.Errorf("failed to initialize base backend: %w", err)
	}

	// Set default block size
	if config.BlockSize <= 0 {
		config.BlockSize = 4096 // 4KB default
	}

	// Create block storage directories
	blockDirs := []string{"blocks", "block_metadata"}
	for _, dir := range blockDirs {
		path := filepath.Join(base.basePath, dir)
		if err := os.MkdirAll(path, 0755); err != nil {
			return nil, fmt.Errorf("failed to create block directory %s: %w", dir, err)
		}
	}

	// Create metadata backend
	metadataPath := filepath.Join(base.basePath, "block_metadata", "blocks.db")
	metadataBackend, err := NewMetadataBackend(config.MetadataBackend, metadataPath)
	if err != nil {
		return nil, fmt.Errorf("failed to create metadata backend: %w", err)
	}

	storage := &BlockStorageEngine{
		BaseBackend:     base,
		blockSize:       config.BlockSize,
		metadataBackend: metadataBackend,
	}

	return storage, nil
}

// StoreBlock stores a block with deduplication
func (bs *BlockStorageEngine) StoreBlock(ctx context.Context, hash string, data []byte) error {
	// Calculate CRC32 checksum
	crc := crc32.ChecksumIEEE(data)

	// Check if block already exists
	if exists, err := bs.BlockExists(ctx, hash); err != nil {
		return fmt.Errorf("failed to check block existence: %w", err)
	} else if exists {
		// Block already exists, increment reference count
		return bs.incrementReference(ctx, hash)
	}

	// Store block data
	blockPath := bs.GetBlockPath(hash)
	dir := filepath.Dir(blockPath)
	if err := os.MkdirAll(dir, 0755); err != nil {
		return fmt.Errorf("failed to create block directory: %w", err)
	}

	// Process data (compression, encryption)
	processedData, err := bs.processData(data)
	if err != nil {
		return fmt.Errorf("failed to process data: %w", err)
	}

	// Write block atomically
	tempFile := blockPath + ".tmp"
	if err := os.WriteFile(tempFile, processedData, 0644); err != nil {
		return fmt.Errorf("failed to write block: %w", err)
	}

	// Atomic rename
	if err := os.Rename(tempFile, blockPath); err != nil {
		os.Remove(tempFile) // Clean up temp file
		return fmt.Errorf("failed to rename block: %w", err)
	}

	// Store block metadata
	metadata := BlockMetadata{
		Hash:      hash,
		Size:      int64(len(data)),
		CRC32:     crc,
		Reference: 1,
		CreatedAt: time.Now().Unix(),
		Checksum: map[string]string{
			"crc32": fmt.Sprintf("%08x", crc),
		},
	}

	metadataJSON, err := json.Marshal(metadata)
	if err != nil {
		return fmt.Errorf("failed to marshal metadata: %w", err)
	}

	metadataMap := map[string]string{
		"hash":      hash,
		"size":      fmt.Sprintf("%d", len(data)),
		"crc32":     fmt.Sprintf("%08x", crc),
		"reference": "1",
		"metadata":  string(metadataJSON),
	}

	if err := bs.metadataBackend.StoreMetadata(ctx, "block_"+hash, metadataMap); err != nil {
		// Clean up block file on metadata failure
		os.Remove(blockPath)
		return fmt.Errorf("failed to store block metadata: %w", err)
	}

	// Update statistics
	bs.UpdateStats(int64(len(processedData)), 0, 1, 1)

	return nil
}

// LoadBlock loads a block
func (bs *BlockStorageEngine) LoadBlock(ctx context.Context, hash string) ([]byte, error) {
	blockPath := bs.GetBlockPath(hash)

	data, err := os.ReadFile(blockPath)
	if err != nil {
		if os.IsNotExist(err) {
			return nil, types.ErrStorageNotFound
		}
		return nil, fmt.Errorf("failed to read block: %w", err)
	}

	// Unprocess data (decompression, decryption)
	unprocessedData, err := bs.unprocessData(data)
	if err != nil {
		return nil, fmt.Errorf("failed to unprocess data: %w", err)
	}

	// Verify CRC32 checksum
	metadata, err := bs.metadataBackend.LoadMetadata(ctx, "block_"+hash)
	if err != nil {
		return nil, fmt.Errorf("failed to load block metadata: %w", err)
	}

	if metadata["crc32"] != "" {
		expectedCRC := metadata["crc32"]
		actualCRC := fmt.Sprintf("%08x", crc32.ChecksumIEEE(unprocessedData))
		if expectedCRC != actualCRC {
			return nil, fmt.Errorf("CRC32 checksum mismatch: expected %s, got %s", expectedCRC, actualCRC)
		}
	}

	return unprocessedData, nil
}

// DeleteBlock deletes a block (with reference counting)
func (bs *BlockStorageEngine) DeleteBlock(ctx context.Context, hash string) error {
	// Get current reference count
	metadata, err := bs.metadataBackend.LoadMetadata(ctx, "block_"+hash)
	if err != nil {
		if err == types.ErrStorageNotFound {
			return types.ErrStorageNotFound
		}
		return fmt.Errorf("failed to load block metadata: %w", err)
	}

	referenceStr := metadata["reference"]
	if referenceStr == "" {
		referenceStr = "1"
	}

	var reference int64
	fmt.Sscanf(referenceStr, "%d", &reference)

	if reference > 1 {
		// Decrement reference count
		reference--
		metadata["reference"] = fmt.Sprintf("%d", reference)
		return bs.metadataBackend.StoreMetadata(ctx, "block_"+hash, metadata)
	}

	// Last reference, delete the block
	blockPath := bs.GetBlockPath(hash)
	info, err := os.Stat(blockPath)
	if err != nil {
		if os.IsNotExist(err) {
			return types.ErrStorageNotFound
		}
		return fmt.Errorf("failed to stat block: %w", err)
	}

	if err := os.Remove(blockPath); err != nil {
		return fmt.Errorf("failed to delete block: %w", err)
	}

	// Delete metadata
	if err := bs.metadataBackend.DeleteMetadata(ctx, "block_"+hash); err != nil {
		return fmt.Errorf("failed to delete block metadata: %w", err)
	}

	// Update statistics
	bs.UpdateStats(-info.Size(), 0, -1, -1)

	return nil
}

// BlockExists checks if a block exists
func (bs *BlockStorageEngine) BlockExists(ctx context.Context, hash string) (bool, error) {
	blockPath := bs.GetBlockPath(hash)
	_, err := os.Stat(blockPath)
	if err != nil {
		if os.IsNotExist(err) {
			return false, nil
		}
		return false, fmt.Errorf("failed to stat block: %w", err)
	}
	return true, nil
}

// incrementReference increments the reference count for a block
func (bs *BlockStorageEngine) incrementReference(ctx context.Context, hash string) error {
	metadata, err := bs.metadataBackend.LoadMetadata(ctx, "block_"+hash)
	if err != nil {
		return fmt.Errorf("failed to load block metadata: %w", err)
	}

	referenceStr := metadata["reference"]
	if referenceStr == "" {
		referenceStr = "1"
	}

	var reference int64
	fmt.Sscanf(referenceStr, "%d", &reference)
	reference++

	metadata["reference"] = fmt.Sprintf("%d", reference)
	return bs.metadataBackend.StoreMetadata(ctx, "block_"+hash, metadata)
}

// SplitFile splits a file into blocks
func (bs *BlockStorageEngine) SplitFile(ctx context.Context, data []byte) ([]BlockInfo, error) {
	if len(data) == 0 {
		return []BlockInfo{}, nil // Return empty slice for empty data
	}

	var blocks []BlockInfo
	offset := int64(0)

	for offset < int64(len(data)) {
		// Calculate block size
		blockSize := bs.blockSize
		remaining := int64(len(data)) - offset
		if remaining < int64(blockSize) {
			blockSize = int(remaining)
		}

		// Extract block data
		blockData := data[offset : offset+int64(blockSize)]

		// Calculate block hash (simplified - should use proper hash function)
		hash := fmt.Sprintf("block_%d_%d", offset, blockSize)

		// Store block
		if err := bs.StoreBlock(ctx, hash, blockData); err != nil {
			return nil, fmt.Errorf("failed to store block at offset %d: %w", offset, err)
		}

		// Create block info
		blockInfo := BlockInfo{
			Hash:   hash,
			Size:   int64(blockSize),
			Offset: offset,
			CRC32:  crc32.ChecksumIEEE(blockData),
		}

		blocks = append(blocks, blockInfo)
		offset += int64(blockSize)
	}

	return blocks, nil
}

// ReconstructFile reconstructs a file from blocks
func (bs *BlockStorageEngine) ReconstructFile(ctx context.Context, blocks []BlockInfo) ([]byte, error) {
	if len(blocks) == 0 {
		return []byte{}, nil // Return empty slice instead of error
	}

	// Calculate total size
	var totalSize int64
	for _, block := range blocks {
		totalSize += block.Size
	}

	// Reconstruct file
	result := make([]byte, totalSize)
	offset := int64(0)

	for _, blockInfo := range blocks {
		// Load block
		blockData, err := bs.LoadBlock(ctx, blockInfo.Hash)
		if err != nil {
			return nil, fmt.Errorf("failed to load block %s: %w", blockInfo.Hash, err)
		}

		// Verify block size
		if int64(len(blockData)) != blockInfo.Size {
			return nil, fmt.Errorf("block size mismatch for %s: expected %d, got %d",
				blockInfo.Hash, blockInfo.Size, len(blockData))
		}

		// Copy to result
		copy(result[offset:], blockData)
		offset += blockInfo.Size
	}

	return result, nil
}

// Compact performs block compaction
func (bs *BlockStorageEngine) Compact(ctx context.Context) error {
	// This is a simplified implementation
	// In a real implementation, this would:
	// 1. Identify unused blocks (reference count = 0)
	// 2. Reorganize block storage
	// 3. Update metadata

	return nil
}

// GetBlockStats returns block storage statistics
func (bs *BlockStorageEngine) GetBlockStats(ctx context.Context) (map[string]interface{}, error) {
	stats := make(map[string]interface{})

	// Get basic stats
	storageStats, err := bs.GetStats(ctx)
	if err != nil {
		return nil, fmt.Errorf("failed to get storage stats: %w", err)
	}

	stats["total_blocks"] = storageStats.BlockCount
	stats["total_block_size"] = storageStats.UsedSize
	stats["block_size"] = bs.blockSize

	// Calculate deduplication ratio (simplified)
	if storageStats.BlockCount > 0 {
		stats["avg_block_size"] = storageStats.UsedSize / storageStats.BlockCount
	}

	return stats, nil
}

// VerifyBlocks verifies all blocks integrity
func (bs *BlockStorageEngine) VerifyBlocks(ctx context.Context) error {
	// This would iterate through all blocks and verify their checksums
	// For now, this is a placeholder
	return nil
}

// Close closes the block storage
func (bs *BlockStorageEngine) Close() error {
	if bs.metadataBackend != nil {
		return bs.metadataBackend.Close()
	}
	return nil
}