package dedup

import (
	"context"
	"fmt"
	"runtime"
	"sync"
	"sync/atomic"
	"time"
)

// ConcurrentProcessor handles concurrent deduplication processing
type ConcurrentProcessor interface {
	// ProcessFilesConcurrently processes multiple files concurrently
	ProcessFilesConcurrently(ctx context.Context, files []FileTask) ([]ConcurrentProcessingResult, error)

	// ProcessBlocksConcurrently processes blocks concurrently
	ProcessBlocksConcurrently(ctx context.Context, blocks []BlockTask) ([]BlockResult, error)

	// ProcessSnapshotsConcurrently processes snapshots concurrently
	ProcessSnapshotsConcurrently(ctx context.Context, snapshots []SnapshotTask) ([]SnapshotResult, error)

	// GetConcurrencyStats returns concurrency statistics
	GetConcurrencyStats() ConcurrencyStats

	// SetMaxConcurrency sets the maximum concurrency level
	SetMaxConcurrency(maxConcurrency int) error
}

// FileTask represents a file processing task
type FileTask struct {
	ID          string
	FilePath    string
	Data        []byte
	Priority    int
	Metadata    map[string]interface{}
	Timeout     time.Duration
}

// BlockTask represents a block processing task
type BlockTask struct {
	ID         string
	BlockData  []byte
	BlockHash  string
	Offset     int64
	Size       int64
	Priority   int
	Metadata   map[string]interface{}
	Timeout    time.Duration
}

// SnapshotTask represents a snapshot processing task
type SnapshotTask struct {
	ID          string
	SnapshotID  string
	Path        string
	Priority    int
	Metadata    map[string]interface{}
	Timeout     time.Duration
}

// ConcurrentProcessingResult represents a concurrent file processing result
type ConcurrentProcessingResult struct {
	TaskID      string
	FileHash    string
	BlockHashes []string
	Size        int64
	Duration    time.Duration
	Error       error
	Metrics     ProcessingMetrics
}

// BlockResult represents a block processing result
type BlockResult struct {
	TaskID     string
	BlockHash  string
	Exists     bool
	Stored     bool
	Size       int64
	Duration   time.Duration
	Error      error
	Metrics    BlockMetrics
}

// SnapshotResult represents a snapshot processing result
type SnapshotResult struct {
	TaskID       string
	SnapshotID   string
	FileCount    int
	TotalSize    int64
	DedupRatio   float64
	Duration     time.Duration
	Error        error
	Metrics      SnapshotMetrics
}

// ProcessingMetrics contains processing performance metrics
type ProcessingMetrics struct {
	CPUUsage     float64
	MemoryUsage  int64
	Throughput   float64
	ChunkingTime time.Duration
	HashingTime  time.Duration
	StorageTime  time.Duration
}

// BlockMetrics contains block processing metrics
type BlockMetrics struct {
	CPUUsage    float64
	MemoryUsage int64
	Throughput  float64
	HashingTime time.Duration
	LookupTime  time.Duration
	StorageTime time.Duration
}

// SnapshotMetrics contains snapshot processing metrics
type SnapshotMetrics struct {
	CPUUsage      float64
	MemoryUsage   int64
	Throughput    float64
	FileScanTime  time.Duration
	DedupTime     time.Duration
	TotalTime     time.Duration
}

// ConcurrencyStats contains concurrency statistics
type ConcurrencyStats struct {
	ActiveWorkers    int32
	MaxWorkers       int32
	ProcessedTasks   int64
	FailedTasks      int64
	AverageLatency   time.Duration
	Throughput       float64
	QueueSize        int32
	WorkerUtilization float64
}

// SmartConcurrentProcessor implements intelligent concurrent deduplication processing
type SmartConcurrentProcessor struct {
	mu                    sync.RWMutex
	maxConcurrency        int
	currentConcurrency    int32
	workerPool           []*ConcurrentWorker
	taskQueue            chan ConcurrentTask
	resultQueue          chan ConcurrentTaskResult
	stats                ConcurrencyStats
	loadBalancer         *LoadBalancer
	backpressureManager  *BackpressureManager
	adaptiveController   *AdaptiveController
	metricsCollector     *MetricsCollector
	shutdownChan         chan struct{}
	wg                   sync.WaitGroup
}

// ConcurrentWorker represents a concurrent worker
type ConcurrentWorker struct {
	ID               int
	processor        *SmartConcurrentProcessor
	taskQueue        <-chan ConcurrentTask
	resultQueue      chan<- ConcurrentTaskResult
	active           atomic.Bool
	stats            WorkerStats
	lastTaskTime     time.Time
	performanceScore float64
}

// WorkerStats contains worker performance statistics
type WorkerStats struct {
	TasksProcessed   int64
	TasksFailed      int64
	AverageLatency   time.Duration
	PeakThroughput   float64
	LastUpdate       time.Time
}

// LoadBalancer balances load across workers
type LoadBalancer struct {
	mu              sync.RWMutex
	workers         []*ConcurrentWorker
	algorithm       string // "round_robin", "least_loaded", "performance_based"
	lastWorkerIndex int
	stats           LoadBalancerStats
}

// LoadBalancerStats contains load balancer statistics
type LoadBalancerStats struct {
	TotalAssignments int64
	RebalancingCount int64
	AverageLoad      float64
	MaxLoad          float64
	MinLoad          float64
}

// BackpressureManager manages backpressure
type BackpressureManager struct {
	mu                sync.RWMutex
	enabled           bool
	threshold         int
	currentPressure   int32
	reactionTime       time.Duration
	stats             BackpressureStats
}

// BackpressureStats contains backpressure statistics
type BackpressureStats struct {
	Activations      int64
	Deactivations    int64
	AveragePressure  float64
	MaxPressure      int32
	ReactionTime     time.Duration
}

// AdaptiveController adapts concurrency based on performance
type AdaptiveController struct {
	mu                sync.RWMutex
	enabled           bool
	targetLatency     time.Duration
	targetThroughput  float64
	maxWorkers        int
	minWorkers        int
	adjustmentRate    float64
	performanceWindow []PerformanceSample
	stats             AdaptiveStats
}

// PerformanceSample represents a performance sample
type PerformanceSample struct {
	Timestamp    time.Time
	Latency      time.Duration
	Throughput   float64
	CPUUsage     float64
	MemoryUsage  int64
	WorkerCount  int32
}

// AdaptiveStats contains adaptive controller statistics
type AdaptiveStats struct {
	Adjustments      int64
	ScaleUps         int64
	ScaleDowns       int64
	AverageLatency   time.Duration
	AverageThroughput float64
}

// MetricsCollector collects and analyzes metrics
type MetricsCollector struct {
	mu              sync.RWMutex
	metrics         []MetricSample
	maxSamples      int
	analysisWindow  time.Duration
	stats           MetricsStats
}

// MetricSample represents a metric sample
type MetricSample struct {
	Timestamp time.Time
	Name      string
	Value     float64
	Tags      map[string]string
}

// MetricsStats contains metrics collector statistics
type MetricsStats struct {
	SamplesCollected int64
	SamplesDropped   int64
	AnalysisRuns     int64
	AverageLatency   time.Duration
}

// NewSmartConcurrentProcessor creates a new smart concurrent processor
func NewSmartConcurrentProcessor(maxConcurrency int) *SmartConcurrentProcessor {
	if maxConcurrency <= 0 {
		maxConcurrency = runtime.NumCPU()
	}

	processor := &SmartConcurrentProcessor{
		maxConcurrency:      maxConcurrency,
		taskQueue:          make(chan ConcurrentTask, maxConcurrency*4), // Larger queue for tasks
		resultQueue:        make(chan ConcurrentTaskResult, maxConcurrency*8), // Much larger for results
		workerPool:         make([]*ConcurrentWorker, 0, maxConcurrency),
		shutdownChan:       make(chan struct{}),
		loadBalancer:       NewLoadBalancer("performance_based"),
		backpressureManager: NewBackpressureManager(true, maxConcurrency*2),
		adaptiveController: NewAdaptiveController(true, 100*time.Millisecond, 10*1024*1024, 1, maxConcurrency, 0.1),
		metricsCollector:   NewMetricsCollector(1000, 5*time.Minute),
	}

	return processor
}

// Start starts the concurrent processor
func (p *SmartConcurrentProcessor) Start(ctx context.Context) error {
	p.mu.Lock()
	defer p.mu.Unlock()

	// Initialize workers
	for i := 0; i < p.maxConcurrency; i++ {
		worker := &ConcurrentWorker{
			ID:          i,
			processor:   p,
			taskQueue:   p.taskQueue,
			resultQueue: p.resultQueue,
		}
		p.workerPool = append(p.workerPool, worker)
		p.wg.Add(1)
		go func(w *ConcurrentWorker) {
			defer p.wg.Done()
			w.Start(ctx)
		}(worker)
	}

	// Start background services
	go p.adaptiveController.Start(ctx, p)
	go p.metricsCollector.Start(ctx)

	return nil
}

// Stop stops the concurrent processor
func (p *SmartConcurrentProcessor) Stop() {
	// Close task queue first to stop accepting new tasks
	close(p.taskQueue)

	// Send shutdown signal
	close(p.shutdownChan)

	// Wait for workers with timeout
	done := make(chan struct{})
	go func() {
		p.wg.Wait()
		close(done)
	}()

	select {
	case <-done:
		// Graceful shutdown completed
	case <-time.After(10 * time.Second):
		// Force shutdown if timeout
	}

	// Close result queue
	close(p.resultQueue)
}

// ProcessFilesConcurrently processes multiple files concurrently
func (p *SmartConcurrentProcessor) ProcessFilesConcurrently(ctx context.Context, files []FileTask) ([]ConcurrentProcessingResult, error) {
	results := make([]ConcurrentProcessingResult, len(files))
	resultMap := make(map[string]int)

	// Submit tasks
	for i, file := range files {
		task := ConcurrentTask{
			ID:       file.ID,
			Type:     "file",
			Data:     file,
			Priority: file.Priority,
			Timeout:  file.Timeout,
		}

		resultMap[file.ID] = i

		// Apply backpressure if needed
		if p.backpressureManager.ShouldThrottle() {
			time.Sleep(10 * time.Millisecond)
		}

		select {
		case p.taskQueue <- task:
			atomic.AddInt32(&p.stats.QueueSize, 1)
		case <-ctx.Done():
			return nil, ctx.Err()
		case <-time.After(5 * time.Second):
			return nil, fmt.Errorf("task submission timeout")
		}
	}

	// Collect results
	collected := 0
	startTime := time.Now()
	maxDuration := 30 * time.Second // Increased timeout for stress tests

	for collected < len(files) {
		// Check overall timeout
		if time.Since(startTime) > maxDuration {
			return results, fmt.Errorf("processing timeout: %d/%d completed after %v", collected, len(files), time.Since(startTime))
		}

		select {
		case result, ok := <-p.resultQueue:
			if !ok {
				return results, fmt.Errorf("result queue closed: %d/%d completed", collected, len(files))
			}
			if idx, exists := resultMap[result.TaskID]; exists {
				results[idx] = result.Data.(ConcurrentProcessingResult)
				collected++
				atomic.AddInt32(&p.stats.QueueSize, -1)
			}
		case <-time.After(100 * time.Millisecond): // Short timeout to check context and overall timeout
			// Continue loop to check timeout and context
			continue
		case <-ctx.Done():
			return results, ctx.Err()
		}
	}

	return results, nil
}

// ProcessBlocksConcurrently processes blocks concurrently
func (p *SmartConcurrentProcessor) ProcessBlocksConcurrently(ctx context.Context, blocks []BlockTask) ([]BlockResult, error) {
	results := make([]BlockResult, len(blocks))
	resultMap := make(map[string]int)

	// Submit tasks
	for i, block := range blocks {
		task := ConcurrentTask{
			ID:       block.ID,
			Type:     "block",
			Data:     block,
			Priority: block.Priority,
			Timeout:  block.Timeout,
		}

		resultMap[block.ID] = i

		select {
		case p.taskQueue <- task:
			atomic.AddInt32(&p.stats.QueueSize, 1)
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}

	// Collect results
	collected := 0
	startTime := time.Now()
	maxDuration := 30 * time.Second // Increased timeout for stress tests

	for collected < len(blocks) {
		// Check overall timeout
		if time.Since(startTime) > maxDuration {
			return results, fmt.Errorf("processing timeout: %d/%d completed after %v", collected, len(blocks), time.Since(startTime))
		}

		select {
		case result := <-p.resultQueue:
			if idx, exists := resultMap[result.TaskID]; exists {
				results[idx] = result.Data.(BlockResult)
				collected++
				atomic.AddInt32(&p.stats.QueueSize, -1)
			}
		case <-time.After(100 * time.Millisecond): // Short timeout to check context and overall timeout
			// Continue loop to check timeout and context
			continue
		case <-ctx.Done():
			return results, ctx.Err()
		}
	}

	return results, nil
}

// ProcessSnapshotsConcurrently processes snapshots concurrently
func (p *SmartConcurrentProcessor) ProcessSnapshotsConcurrently(ctx context.Context, snapshots []SnapshotTask) ([]SnapshotResult, error) {
	results := make([]SnapshotResult, len(snapshots))
	resultMap := make(map[string]int)

	// Submit tasks
	for i, snapshot := range snapshots {
		task := ConcurrentTask{
			ID:       snapshot.ID,
			Type:     "snapshot",
			Data:     snapshot,
			Priority: snapshot.Priority,
			Timeout:  snapshot.Timeout,
		}

		resultMap[snapshot.ID] = i

		select {
		case p.taskQueue <- task:
			atomic.AddInt32(&p.stats.QueueSize, 1)
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}

	// Collect results
	collected := 0
	for collected < len(snapshots) {
		select {
		case result := <-p.resultQueue:
			if idx, exists := resultMap[result.TaskID]; exists {
				results[idx] = result.Data.(SnapshotResult)
				collected++
				atomic.AddInt32(&p.stats.QueueSize, -1)
			}
		case <-ctx.Done():
			return results, ctx.Err()
		}
	}

	return results, nil
}

// GetConcurrencyStats returns concurrency statistics
func (p *SmartConcurrentProcessor) GetConcurrencyStats() ConcurrencyStats {
	p.mu.RLock()
	defer p.mu.RUnlock()

	stats := p.stats
	stats.ActiveWorkers = atomic.LoadInt32(&p.currentConcurrency)
	stats.MaxWorkers = int32(p.maxConcurrency)
	stats.QueueSize = atomic.LoadInt32(&p.stats.QueueSize)

	// Calculate worker utilization
	activeWorkers := 0
	for _, worker := range p.workerPool {
		if worker.active.Load() {
			activeWorkers++
		}
	}
	if len(p.workerPool) > 0 {
		stats.WorkerUtilization = float64(activeWorkers) / float64(len(p.workerPool))
	}

	return stats
}

// SetMaxConcurrency sets the maximum concurrency level
func (p *SmartConcurrentProcessor) SetMaxConcurrency(maxConcurrency int) error {
	p.mu.Lock()
	defer p.mu.Unlock()

	if maxConcurrency <= 0 {
		return fmt.Errorf("max concurrency must be positive")
	}

	p.maxConcurrency = maxConcurrency
	return nil
}

// Start starts the worker
func (w *ConcurrentWorker) Start(ctx context.Context) {
	w.active.Store(true)
	defer w.active.Store(false)

	for {
		select {
		case <-ctx.Done():
			return
		case task, ok := <-w.taskQueue:
			if !ok {
				return
			}
			w.processTask(ctx, task)
		}
	}
}

// processTask processes a task
func (w *ConcurrentWorker) processTask(ctx context.Context, task ConcurrentTask) {
	startTime := time.Now()

	var result interface{}

	switch task.Type {
	case "file":
		result = w.processFileTask(ctx, task.Data.(FileTask))
	case "block":
		result = w.processBlockTask(ctx, task.Data.(BlockTask))
	case "snapshot":
		result = w.processSnapshotTask(ctx, task.Data.(SnapshotTask))
	}

	// Send result with timeout to avoid blocking
	resultData := ConcurrentTaskResult{
		TaskID:   task.ID,
		Data:     result,
		Duration: time.Since(startTime),
	}

	select {
	case w.resultQueue <- resultData:
		// Result sent successfully
	case <-time.After(1 * time.Second):
		// Timeout sending result, log and continue
		fmt.Printf("Timeout sending result for task %s, queue might be full\n", task.ID)
		// Try non-blocking send as fallback
		select {
		case w.resultQueue <- resultData:
		default:
			// If still can't send, we'll lose this result but continue processing
			fmt.Printf("Dropped result for task %s due to full queue\n", task.ID)
		}
	}

	w.stats.TasksProcessed++
	w.stats.LastUpdate = time.Now()

	// Also update the main processor stats
	atomic.AddInt64(&w.processor.stats.ProcessedTasks, 1)
}

// processFileTask processes a file task
func (w *ConcurrentWorker) processFileTask(ctx context.Context, task FileTask) ConcurrentProcessingResult {
	// Check context to allow cancellation
	select {
	case <-ctx.Done():
		return ConcurrentProcessingResult{
			TaskID: task.ID,
			Error:  ctx.Err(),
		}
	default:
	}
	// Simulate file processing
	result := ConcurrentProcessingResult{
		TaskID:   task.ID,
		Size:     int64(len(task.Data)),
		Duration: 10 * time.Millisecond,
		Metrics: ProcessingMetrics{
			CPUUsage:    0.5,
			MemoryUsage: 1024 * 1024, // 1MB
			Throughput:  float64(len(task.Data)) / 0.01, // 10ms processing time
		},
	}

	return result
}

// processBlockTask processes a block task
func (w *ConcurrentWorker) processBlockTask(ctx context.Context, task BlockTask) BlockResult {
	// Check context to allow cancellation
	select {
	case <-ctx.Done():
		return BlockResult{
			TaskID: task.ID,
			Error:  ctx.Err(),
		}
	default:
	}
	// Simulate block processing
	result := BlockResult{
		TaskID:    task.ID,
		BlockHash: task.BlockHash,
		Exists:    false,
		Stored:    true,
		Size:      task.Size,
		Duration:  1 * time.Millisecond,
		Metrics: BlockMetrics{
			CPUUsage:    0.1,
			MemoryUsage: 64 * 1024, // 64KB
			Throughput:  float64(task.Size) / 0.001, // 1ms processing time
		},
	}

	return result
}

// processSnapshotTask processes a snapshot task
func (w *ConcurrentWorker) processSnapshotTask(ctx context.Context, task SnapshotTask) SnapshotResult {
	// Check context to allow cancellation
	select {
	case <-ctx.Done():
		return SnapshotResult{
			TaskID: task.ID,
			Error:  ctx.Err(),
		}
	default:
	}
	// Simulate snapshot processing
	result := SnapshotResult{
		TaskID:     task.ID,
		SnapshotID: task.SnapshotID,
		FileCount:  100,
		TotalSize:  1024 * 1024 * 100, // 100MB
		DedupRatio: 0.3,               // 30% deduplication
		Duration:   100 * time.Millisecond,
		Metrics: SnapshotMetrics{
			CPUUsage:    0.8,
			MemoryUsage: 10 * 1024 * 1024, // 10MB
			Throughput:  float64(100*1024*1024) / 0.1, // 100MB in 100ms
		},
	}

	return result
}

// NewLoadBalancer creates a new load balancer
func NewLoadBalancer(algorithm string) *LoadBalancer {
	return &LoadBalancer{
		workers:   make([]*ConcurrentWorker, 0),
		algorithm: algorithm,
	}
}

// AddWorker adds a worker to the load balancer
func (lb *LoadBalancer) AddWorker(worker *ConcurrentWorker) {
	lb.mu.Lock()
	defer lb.mu.Unlock()

	lb.workers = append(lb.workers, worker)
}

// SelectWorker selects a worker based on the algorithm
func (lb *LoadBalancer) SelectWorker() *ConcurrentWorker {
	lb.mu.RLock()
	defer lb.mu.RUnlock()

	if len(lb.workers) == 0 {
		return nil
	}

	switch lb.algorithm {
	case "round_robin":
		worker := lb.workers[lb.lastWorkerIndex]
		lb.lastWorkerIndex = (lb.lastWorkerIndex + 1) % len(lb.workers)
		return worker
	case "least_loaded":
		return lb.selectLeastLoadedWorker()
	case "performance_based":
		return lb.selectBestPerformanceWorker()
	default:
		return lb.workers[0]
	}
}

// selectLeastLoadedWorker selects the least loaded worker
func (lb *LoadBalancer) selectLeastLoadedWorker() *ConcurrentWorker {
	var bestWorker *ConcurrentWorker
	minLoad := int64(^uint64(0) >> 1) // Max int64

	for _, worker := range lb.workers {
		if worker.stats.TasksProcessed < minLoad {
			minLoad = worker.stats.TasksProcessed
			bestWorker = worker
		}
	}

	return bestWorker
}

// selectBestPerformanceWorker selects the best performing worker
func (lb *LoadBalancer) selectBestPerformanceWorker() *ConcurrentWorker {
	var bestWorker *ConcurrentWorker
	bestScore := -1.0

	for _, worker := range lb.workers {
		score := worker.performanceScore
		if score > bestScore {
			bestScore = score
			bestWorker = worker
		}
	}

	return bestWorker
}

// NewBackpressureManager creates a new backpressure manager
func NewBackpressureManager(enabled bool, threshold int) *BackpressureManager {
	return &BackpressureManager{
		enabled:        enabled,
		threshold:      threshold,
		reactionTime:   100 * time.Millisecond,
	}
}

// ShouldThrottle determines if throttling should be applied
func (bm *BackpressureManager) ShouldThrottle() bool {
	if !bm.enabled {
		return false
	}

	current := atomic.LoadInt32(&bm.currentPressure)
	return current > int32(bm.threshold)
}

// UpdatePressure updates the current pressure level
func (bm *BackpressureManager) UpdatePressure(delta int32) {
	atomic.AddInt32(&bm.currentPressure, delta)
}

// NewAdaptiveController creates a new adaptive controller
func NewAdaptiveController(enabled bool, targetLatency time.Duration, targetThroughput float64, minWorkers, maxWorkers int, adjustmentRate float64) *AdaptiveController {
	return &AdaptiveController{
		enabled:          enabled,
		targetLatency:     targetLatency,
		targetThroughput:  targetThroughput,
		minWorkers:        minWorkers,
		maxWorkers:        maxWorkers,
		adjustmentRate:    adjustmentRate,
		performanceWindow: make([]PerformanceSample, 0, 100),
	}
}

// Start starts the adaptive controller
func (ac *AdaptiveController) Start(ctx context.Context, processor *SmartConcurrentProcessor) {
	if !ac.enabled {
		return
	}

	ticker := time.NewTicker(1 * time.Second)
	defer ticker.Stop()

	for {
		select {
		case <-ctx.Done():
			return
		case <-ticker.C:
			ac.adjustConcurrency(processor)
		}
	}
}

// adjustConcurrency adjusts concurrency based on performance
func (ac *AdaptiveController) adjustConcurrency(processor *SmartConcurrentProcessor) {
	// Get current performance metrics
	stats := processor.GetConcurrencyStats()

	// Add sample to window
	sample := PerformanceSample{
		Timestamp:   time.Now(),
		Latency:     stats.AverageLatency,
		Throughput:  stats.Throughput,
		WorkerCount: stats.ActiveWorkers,
	}

	ac.mu.Lock()
	ac.performanceWindow = append(ac.performanceWindow, sample)
	if len(ac.performanceWindow) > 100 {
		ac.performanceWindow = ac.performanceWindow[1:]
	}
	ac.mu.Unlock()

	// Make adjustment decision
	if stats.Throughput < ac.targetThroughput && stats.ActiveWorkers < int32(ac.maxWorkers) {
		// Scale up
		processor.SetMaxConcurrency(int(stats.ActiveWorkers) + 1)
		ac.stats.ScaleUps++
	} else if stats.Throughput > ac.targetThroughput && stats.ActiveWorkers > int32(ac.minWorkers) {
		// Scale down
		processor.SetMaxConcurrency(int(stats.ActiveWorkers) - 1)
		ac.stats.ScaleDowns++
	}

	ac.stats.Adjustments++
}

// NewMetricsCollector creates a new metrics collector
func NewMetricsCollector(maxSamples int, analysisWindow time.Duration) *MetricsCollector {
	return &MetricsCollector{
		metrics:        make([]MetricSample, 0, maxSamples),
		maxSamples:     maxSamples,
		analysisWindow: analysisWindow,
	}
}

// Start starts the metrics collector
func (mc *MetricsCollector) Start(ctx context.Context) {
	ticker := time.NewTicker(10 * time.Second)
	defer ticker.Stop()

	for {
		select {
		case <-ctx.Done():
			return
		case <-ticker.C:
			mc.analyzeMetrics()
		}
	}
}

// analyzeMetrics analyzes collected metrics
func (mc *MetricsCollector) analyzeMetrics() {
	mc.mu.Lock()
	defer mc.mu.Unlock()

	// Remove old samples
	cutoff := time.Now().Add(-mc.analysisWindow)
	filtered := make([]MetricSample, 0, len(mc.metrics))
	for _, sample := range mc.metrics {
		if sample.Timestamp.After(cutoff) {
			filtered = append(filtered, sample)
		} else {
			mc.stats.SamplesDropped++
		}
	}
	mc.metrics = filtered

	mc.stats.AnalysisRuns++
}

// ConcurrentTask represents a concurrent task
type ConcurrentTask struct {
	ID       string
	Type     string
	Data     interface{}
	Priority int
	Timeout  time.Duration
}

// ConcurrentTaskResult represents a concurrent task result
type ConcurrentTaskResult struct {
	TaskID   string
	Data     interface{}
	Duration time.Duration
	Error    error
}