package dedup

import (
	"context"
	"fmt"
	"math"
	"sync"
	"time"
)

// DedupStrategy represents intelligent deduplication decision making
type DedupStrategy interface {
	// ShouldDedup determines if deduplication should be applied to given data
	ShouldDedup(ctx context.Context, metadata DedupMetadata) (bool, error)

	// SelectAlgorithm selects the optimal deduplication algorithm
	SelectAlgorithm(ctx context.Context, metadata DedupMetadata) (string, error)

	// GetChunkingConfig returns optimal chunking configuration
	GetChunkingConfig(ctx context.Context, metadata DedupMetadata) (*ChunkingConfig, error)

	// UpdateStrategy updates strategy based on performance metrics
	UpdateStrategy(ctx context.Context, metrics PerformanceMetrics) error

	// GetStrategyStats returns strategy statistics
	GetStrategyStats() StrategyStats
}

// DedupMetadata contains metadata for deduplication decision
type DedupMetadata struct {
	FilePath     string
	FileSize     int64
	FileType     string // file extension or MIME type
	ContentType  string // detected content type
	AccessTime   time.Time
	ModifiedTime time.Time
	Entropy      float64 // data entropy for randomness detection
	Similarity   float64 // similarity to existing data
	Priority     int     // processing priority
	Tags         []string
}

// ChunkingConfig represents optimal chunking configuration
type ChunkingConfig struct {
	Algorithm      string // "fixed", "rabin", "buzhash", "content-defined"
	MinChunkSize   int64
	MaxChunkSize   int64
	AvgChunkSize   int64
	BlockSize      int64
	EnableFeatures []string // features to enable
}

// PerformanceMetrics contains performance data for strategy optimization
type PerformanceMetrics struct {
	Timestamp        time.Time
	Algorithm        string
	ProcessingTime   time.Duration
	MemoryUsage      int64
	CPUUsage         float64
	DeduplicationRatio float64
	Throughput       float64 // bytes per second
	ErrorRate        float64
	CacheHitRate     float64
}

// StrategyStats contains strategy statistics
type StrategyStats struct {
	TotalDecisions      int64
	SuccessfulDedups    int64
	FailedDedups        int64
	AverageRatio        float64
	AlgorithmDistribution map[string]int64
	LastUpdated         time.Time
}

// SmartDedupStrategy implements intelligent deduplication strategy
type SmartDedupStrategy struct {
	mu              sync.RWMutex
	config          StrategyConfig
	stats           StrategyStats
	performanceLog  []PerformanceMetrics
	algorithmStats  map[string]*AlgorithmStats
	contentPatterns map[string]*ContentPattern
	thresholds      ThresholdConfig
}

// StrategyConfig contains strategy configuration
type StrategyConfig struct {
	Enabled                    bool
	MinFileSize               int64
	MaxFileSize               int64
	MinEntropy                float64
	MaxEntropy                float64
	MinSimilarity             float64
	TargetRatio               float64
	MaxProcessingTime         time.Duration
	MaxMemoryUsage            int64
	EnableAdaptive            bool
	LearningRate              float64
	UpdateInterval            time.Duration
	PerformanceWindowSize     int
}

// AlgorithmStats contains statistics for each algorithm
type AlgorithmStats struct {
	UsageCount       int64
	TotalProcessingTime time.Duration
	AverageRatio     float64
	SuccessRate      float64
	AverageThroughput float64
	LastUsed         time.Time
	Score            float64
}

// ContentPattern represents content type patterns
type ContentPattern struct {
	Type             string
	EntropyRange     [2]float64
	OptimalAlgorithm string
	OptimalChunkSize int64
	SuccessRate      float64
	AverageRatio     float64
}

// ThresholdConfig contains threshold configurations
type ThresholdConfig struct {
	MinFileSize       int64
	MaxFileSize       int64
	MinEntropy        float64
	MaxEntropy        float64
	MinSimilarity     float64
	MaxProcessingTime time.Duration
	MaxErrorRate      float64
}

// NewSmartDedupStrategy creates a new smart deduplication strategy
func NewSmartDedupStrategy(config StrategyConfig) (*SmartDedupStrategy, error) {
	if !config.Enabled {
		return nil, fmt.Errorf("strategy is disabled")
	}

	strategy := &SmartDedupStrategy{
		config:          config,
		stats:           StrategyStats{},
		performanceLog:  make([]PerformanceMetrics, 0, config.PerformanceWindowSize),
		algorithmStats:  make(map[string]*AlgorithmStats),
		contentPatterns: make(map[string]*ContentPattern),
		thresholds:      getDefaultThresholds(),
	}

	// Initialize algorithm statistics
	algorithms := []string{"fixed", "rabin", "buzhash", "content-defined"}
	for _, algo := range algorithms {
		strategy.algorithmStats[algo] = &AlgorithmStats{
			Score: 1.0, // Initial neutral score
		}
	}

	// Initialize content patterns
	strategy.initializeContentPatterns()

	return strategy, nil
}

// ShouldDedup determines if deduplication should be applied
func (s *SmartDedupStrategy) ShouldDedup(ctx context.Context, metadata DedupMetadata) (bool, error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	s.stats.TotalDecisions++

	// Check basic thresholds
	if !s.checkBasicThresholds(metadata) {
		s.stats.FailedDedups++
		return false, nil
	}

	// Calculate deduplication probability
	probability := s.calculateDedupProbability(metadata)

	// Make decision based on probability and thresholds
	shouldDedup := probability >= s.config.TargetRatio

	if shouldDedup {
		s.stats.SuccessfulDedups++
	}

	return shouldDedup, nil
}

// SelectAlgorithm selects the optimal deduplication algorithm
func (s *SmartDedupStrategy) SelectAlgorithm(ctx context.Context, metadata DedupMetadata) (string, error) {
	s.mu.RLock()
	defer s.mu.RUnlock()

	// Check content-specific patterns first
	if pattern, exists := s.contentPatterns[metadata.ContentType]; exists {
		if pattern.SuccessRate > 0.7 {
			return pattern.OptimalAlgorithm, nil
		}
	}

	// Use adaptive algorithm selection
	if s.config.EnableAdaptive {
		algorithm := s.selectAdaptiveAlgorithm(metadata)
		return algorithm, nil
	}

	// Fallback to rule-based selection
	return s.selectRuleBasedAlgorithm(metadata)
}

// GetChunkingConfig returns optimal chunking configuration
func (s *SmartDedupStrategy) GetChunkingConfig(ctx context.Context, metadata DedupMetadata) (*ChunkingConfig, error) {
	algorithm, err := s.SelectAlgorithm(ctx, metadata)
	if err != nil {
		return nil, fmt.Errorf("failed to select algorithm: %w", err)
	}

	config := &ChunkingConfig{
		Algorithm: algorithm,
	}

	// Optimize chunk sizes based on file characteristics
	switch algorithm {
	case "fixed":
		config.BlockSize = s.optimizeFixedChunkSize(metadata)
		config.MinChunkSize = config.BlockSize
		config.MaxChunkSize = config.BlockSize
		config.AvgChunkSize = config.BlockSize
	case "rabin", "buzhash", "content-defined":
		config.MinChunkSize, config.MaxChunkSize, config.AvgChunkSize = s.optimizeVariableChunkSizes(metadata)
	}

	// Enable features based on content type
	config.EnableFeatures = s.selectFeatures(metadata)

	return config, nil
}

// UpdateStrategy updates strategy based on performance metrics
func (s *SmartDedupStrategy) UpdateStrategy(ctx context.Context, metrics PerformanceMetrics) error {
	s.mu.Lock()
	defer s.mu.Unlock()

	// Add to performance log
	s.performanceLog = append(s.performanceLog, metrics)
	if len(s.performanceLog) > s.config.PerformanceWindowSize {
		s.performanceLog = s.performanceLog[1:]
	}

	// Update algorithm statistics
	if algoStats, exists := s.algorithmStats[metrics.Algorithm]; exists {
		s.updateAlgorithmStats(algoStats, metrics)
	}

	// Update content patterns if applicable
	s.updateContentPatterns(metrics)

	// Recalculate algorithm scores
	if s.config.EnableAdaptive {
		s.recalculateAlgorithmScores()
	}

	s.stats.LastUpdated = time.Now()
	return nil
}

// GetStrategyStats returns strategy statistics
func (s *SmartDedupStrategy) GetStrategyStats() StrategyStats {
	s.mu.RLock()
	defer s.mu.RUnlock()

	stats := s.stats
	stats.AlgorithmDistribution = make(map[string]int64)

	for algo, algoStats := range s.algorithmStats {
		stats.AlgorithmDistribution[algo] = algoStats.UsageCount
	}

	if s.stats.SuccessfulDedups > 0 {
		stats.AverageRatio = float64(s.stats.SuccessfulDedups) / float64(s.stats.TotalDecisions)
	}

	return stats
}

// checkBasicThresholds checks basic deduplication thresholds
func (s *SmartDedupStrategy) checkBasicThresholds(metadata DedupMetadata) bool {
	// File size check
	if metadata.FileSize < s.thresholds.MinFileSize || metadata.FileSize > s.thresholds.MaxFileSize {
		return false
	}

	// Entropy check (avoid highly random data)
	if metadata.Entropy < s.thresholds.MinEntropy || metadata.Entropy > s.thresholds.MaxEntropy {
		return false
	}

	// Similarity check
	if metadata.Similarity < s.thresholds.MinSimilarity {
		return false
	}

	return true
}

// calculateDedupProbability calculates deduplication probability
func (s *SmartDedupStrategy) calculateDedupProbability(metadata DedupMetadata) float64 {
	// Base probability from similarity
	probability := metadata.Similarity

	// Adjust based on file size (larger files benefit more from dedup)
	sizeFactor := math.Min(float64(metadata.FileSize)/float64(1024*1024), 1.0) // Cap at 1MB
	probability *= (0.5 + 0.5*sizeFactor)

	// Adjust based on entropy (medium entropy is best for dedup)
	entropyFactor := 1.0 - math.Abs(metadata.Entropy-0.5)*2 // Peak at 0.5 entropy
	probability *= entropyFactor

	// Adjust based on file type
	typeFactor := s.getFileTypeFactor(metadata.FileType)
	probability *= typeFactor

	// Adjust based on access pattern
	accessFactor := s.getAccessPatternFactor(metadata)
	probability *= accessFactor

	return math.Min(probability, 1.0)
}

// selectAdaptiveAlgorithm selects algorithm using adaptive learning
func (s *SmartDedupStrategy) selectAdaptiveAlgorithm(metadata DedupMetadata) string {
	bestAlgorithm := "rabin" // Default
	bestScore := -1.0

	for algo, algoStats := range s.algorithmStats {
		score := algoStats.Score

		// Boost score for algorithms that work well with this content type
		if pattern, exists := s.contentPatterns[metadata.ContentType]; exists {
			if pattern.OptimalAlgorithm == algo {
				score *= 1.2
			}
		}

		// Adjust based on file characteristics
		if metadata.FileSize < 64*1024 && algo == "fixed" {
			score *= 1.1 // Small files work well with fixed chunking
		}
		if metadata.Entropy > 0.7 && (algo == "rabin" || algo == "content-defined") {
			score *= 1.15 // High entropy benefits from content-aware chunking
		}

		if score > bestScore {
			bestScore = score
			bestAlgorithm = algo
		}
	}

	// Update usage count
	if algoStats, exists := s.algorithmStats[bestAlgorithm]; exists {
		algoStats.UsageCount++
		algoStats.LastUsed = time.Now()
	}

	return bestAlgorithm
}

// selectRuleBasedAlgorithm selects algorithm using predefined rules
func (s *SmartDedupStrategy) selectRuleBasedAlgorithm(metadata DedupMetadata) (string, error) {
	// Rule-based selection logic
	switch {
	case metadata.FileSize < 64*1024:
		return "fixed", nil // Small files: fixed chunking
	case metadata.ContentType == "text/plain" || metadata.ContentType == "text/html":
		return "rabin", nil // Text content: Rabin chunking
	case metadata.Entropy > 0.8:
		return "content-defined", nil // High entropy: content-defined chunking
	case metadata.FileType == ".tar" || metadata.FileType == ".gz":
		return "buzhash", nil // Compressed archives: BuzHash chunking
	default:
		return "rabin", nil // Default: Rabin chunking
	}
}

// optimizeFixedChunkSize optimizes fixed chunk size
func (s *SmartDedupStrategy) optimizeFixedChunkSize(metadata DedupMetadata) int64 {
	baseSize := int64(4096) // Default 4KB

	// Adjust based on file size
	if metadata.FileSize < 1024*1024 { // < 1MB
		baseSize = 1024 // 1KB for small files
	} else if metadata.FileSize > 100*1024*1024 { // > 100MB
		baseSize = 8192 // 8KB for large files
	}

	// Adjust based on content type
	switch metadata.ContentType {
	case "text/plain":
		baseSize = 2048 // 2KB for text files
	case "application/octet-stream":
		baseSize = 4096 // 4KB for binary files
	case "image/jpeg", "image/png":
		baseSize = 8192 // 8KB for images
	}

	return baseSize
}

// optimizeVariableChunkSizes optimizes variable chunk sizes
func (s *SmartDedupStrategy) optimizeVariableChunkSizes(metadata DedupMetadata) (minSize, maxSize, avgSize int64) {
	// Default sizes
	minSize = 512   // 512 bytes minimum
	maxSize = 16384 // 16KB maximum
	avgSize = 4096  // 4KB average

	// Adjust based on file characteristics
	if metadata.Entropy > 0.7 {
		// High entropy: smaller chunks for better deduplication
		minSize = 256
		maxSize = 8192
		avgSize = 2048
	} else if metadata.Entropy < 0.3 {
		// Low entropy: larger chunks
		minSize = 1024
		maxSize = 32768
		avgSize = 8192
	}

	// Adjust based on file size
	if metadata.FileSize > 100*1024*1024 { // > 100MB
		avgSize = 8192 // Larger average for large files
	}

	return minSize, maxSize, avgSize
}

// selectFeatures selects features to enable based on content
func (s *SmartDedupStrategy) selectFeatures(metadata DedupMetadata) []string {
	var features []string

	// Always enable basic features
	features = append(features, "reference_counting", "integrity_check")

	// Content-specific features
	switch metadata.ContentType {
	case "text/plain", "text/html", "text/xml":
		features = append(features, "similarity_detection", "delta_encoding")
	case "application/json", "application/xml":
		features = append(features, "structure_aware", "similarity_detection")
	case "image/jpeg", "image/png", "image/gif":
		features = append(features, "perceptual_hashing")
	case "application/gzip", "application/zip":
		features = append(features, "compression_aware")
	}

	// Performance features for large files
	if metadata.FileSize > 10*1024*1024 { // > 10MB
		features = append(features, "parallel_processing", "streaming")
	}

	return features
}

// updateAlgorithmStats updates algorithm statistics
func (s *SmartDedupStrategy) updateAlgorithmStats(algoStats *AlgorithmStats, metrics PerformanceMetrics) {
	algoStats.TotalProcessingTime += metrics.ProcessingTime
	algoStats.AverageRatio = (algoStats.AverageRatio*float64(algoStats.UsageCount) + metrics.DeduplicationRatio) / float64(algoStats.UsageCount+1)
	algoStats.AverageThroughput = (algoStats.AverageThroughput*float64(algoStats.UsageCount) + metrics.Throughput) / float64(algoStats.UsageCount+1)

	if metrics.ErrorRate < 0.01 { // Success if error rate < 1%
		algoStats.SuccessRate = (algoStats.SuccessRate*float64(algoStats.UsageCount) + 1.0) / float64(algoStats.UsageCount+1)
	} else {
		algoStats.SuccessRate = (algoStats.SuccessRate*float64(algoStats.UsageCount)) / float64(algoStats.UsageCount+1)
	}

	algoStats.UsageCount++
}

// updateContentPatterns updates content patterns based on metrics
func (s *SmartDedupStrategy) updateContentPatterns(metrics PerformanceMetrics) {
	// This would analyze successful deduplication patterns and update content patterns
	// For now, this is a placeholder for the learning mechanism
}

// recalculateAlgorithmScores recalculates algorithm scores based on performance
func (s *SmartDedupStrategy) recalculateAlgorithmScores() {
	for _, algoStats := range s.algorithmStats {
		// Score based on multiple factors
		score := 0.0

		// Success rate component (40%)
		score += algoStats.SuccessRate * 0.4

		// Deduplication ratio component (30%)
		ratioScore := math.Min(algoStats.AverageRatio, 0.8) / 0.8
		score += ratioScore * 0.3

		// Throughput component (20%)
		throughputScore := math.Min(algoStats.AverageThroughput/(10*1024*1024), 1.0) // Normalize to 10MB/s
		score += throughputScore * 0.2

		// Recency component (10%)
		recencyScore := 1.0
		if !algoStats.LastUsed.IsZero() {
			hoursSinceLastUse := time.Since(algoStats.LastUsed).Hours()
			recencyScore = math.Max(0.1, 1.0-hoursSinceLastUse/168) // Decay over a week
		}
		score += recencyScore * 0.1

		algoStats.Score = score
	}
}

// Helper functions

func (s *SmartDedupStrategy) getFileTypeFactor(fileType string) float64 {
	// Factors based on empirical data about deduplication effectiveness
	switch fileType {
	case ".txt", ".log", ".conf", ".json", ".xml", ".yaml", ".yml":
		return 1.2 // Text files deduplicate well
	case ".tar", ".gz", ".zip", ".rar":
		return 0.3 // Compressed archives don't deduplicate well
	case ".jpg", ".jpeg", ".png", ".gif", ".mp3", ".mp4":
		return 0.4 // Media files don't deduplicate well
	case ".exe", ".dll", ".so":
		return 0.6 // Binary executables have limited deduplication
	default:
		return 1.0 // Neutral factor
	}
}

func (s *SmartDedupStrategy) getAccessPatternFactor(metadata DedupMetadata) float64 {
	// Factor based on access patterns (recently accessed files are more likely to have duplicates)
	recency := time.Since(metadata.AccessTime)

	if recency < 24*time.Hour {
		return 1.1 // Recently accessed
	} else if recency < 7*24*time.Hour {
		return 1.0 // Normal access
	} else {
		return 0.9 // Old access
	}
}

func (s *SmartDedupStrategy) initializeContentPatterns() {
	// Initialize with some common patterns
	s.contentPatterns["text/plain"] = &ContentPattern{
		Type:             "text/plain",
		EntropyRange:     [2]float64{0.3, 0.8},
		OptimalAlgorithm: "rabin",
		OptimalChunkSize: 2048,
		SuccessRate:      0.85,
		AverageRatio:     0.6,
	}

	s.contentPatterns["application/octet-stream"] = &ContentPattern{
		Type:             "application/octet-stream",
		EntropyRange:     [2]float64{0.7, 1.0},
		OptimalAlgorithm: "content-defined",
		OptimalChunkSize: 4096,
		SuccessRate:      0.7,
		AverageRatio:     0.3,
	}
}

func getDefaultThresholds() ThresholdConfig {
	return ThresholdConfig{
		MinFileSize:       1024,                     // 1KB minimum
		MaxFileSize:       100 * 1024 * 1024 * 1024, // 100GB maximum
		MinEntropy:        0.1,                      // Minimum entropy
		MaxEntropy:        0.95,                     // Maximum entropy
		MinSimilarity:     0.1,                      // Minimum similarity
		MaxProcessingTime: 5 * time.Minute,          // Maximum processing time
		MaxErrorRate:      0.05,                     // 5% maximum error rate
	}
}