// Package benchmark provides performance benchmarking utilities
package benchmark

import (
	"context"
	"fmt"
	"runtime"
	"sync"
	"time"

	"github.com/smart-snapshotter/internal/logging"
	"github.com/smart-snapshotter/internal/metrics"
)

// BenchmarkType represents the type of benchmark
type BenchmarkType string

const (
	TypeLatency      BenchmarkType = "latency"
	TypeThroughput   BenchmarkType = "throughput"
	TypeMemory       BenchmarkType = "memory"
	TypeCPU          BenchmarkType = "cpu"
	TypeStorage      BenchmarkType = "storage"
	TypeNetwork      BenchmarkType = "network"
	TypeDeduplication BenchmarkType = "deduplication"
	TypeCache        BenchmarkType = "cache"
)

// BenchmarkResult represents the result of a benchmark
type BenchmarkResult struct {
	Name         string
	Type         BenchmarkType
	Iterations   int
	TotalTime    time.Duration
	AverageTime  time.Duration
	MinTime      time.Duration
	MaxTime      time.Duration
	Throughput   float64 // operations per second
	MemoryUsage  uint64  // bytes
	CPUUsage     float64 // percentage
	Errors       int
	ErrorRate    float64
	Percentiles  map[int]time.Duration // 50th, 90th, 95th, 99th percentiles
	Metadata     map[string]interface{}
}

// BenchmarkConfig provides configuration for benchmarks
type BenchmarkConfig struct {
	Name         string
	Type         BenchmarkType
	Iterations   int
	Duration     time.Duration
	Concurrency  int
	WarmupIterations int
	RecordMemory bool
	RecordCPU    bool
	Percentiles  []int // e.g., [50, 90, 95, 99]
}

// Benchmarker provides benchmark execution capabilities
type Benchmarker interface {
	// Run executes a benchmark
	Run(ctx context.Context, config BenchmarkConfig, fn func() error) (*BenchmarkResult, error)

	// RunConcurrent executes a concurrent benchmark
	RunConcurrent(ctx context.Context, config BenchmarkConfig, fn func(int) error) (*BenchmarkResult, error)

	// Compare compares multiple benchmark results
	Compare(results []*BenchmarkResult) *BenchmarkComparison

	// GenerateReport generates a benchmark report
	GenerateReport(results []*BenchmarkResult) string
}

// benchmarker implements Benchmarker
type benchmarker struct {
	logger  logging.Logger
	metrics metrics.MetricsCollector
}

// NewBenchmarker creates a new benchmarker
func NewBenchmarker() Benchmarker {
	return &benchmarker{
		logger:  logging.WithComponent("benchmark"),
		metrics: metrics.GetMetrics(),
	}
}

// Run executes a benchmark
func (b *benchmarker) Run(ctx context.Context, config BenchmarkConfig, fn func() error) (*BenchmarkResult, error) {
	b.logger.WithFields(
		logging.StringField("name", config.Name),
		logging.StringField("type", string(config.Type)),
		logging.IntField("iterations", config.Iterations),
		logging.DurationField("duration", config.Duration),
	).Info("Starting benchmark")

	// Warmup
	if config.WarmupIterations > 0 {
		b.logger.Info("Running warmup iterations")
		for i := 0; i < config.WarmupIterations; i++ {
			if err := fn(); err != nil {
				b.logger.WithFields(
					logging.StringField("error", err.Error()),
					logging.IntField("warmup_iteration", i),
				).Warn("Warmup iteration failed")
			}
		}
	}

	// Force garbage collection before benchmark
	runtime.GC()
	var m1 runtime.MemStats
	runtime.ReadMemStats(&m1)

	result := &BenchmarkResult{
		Name:        config.Name,
		Type:        config.Type,
		Iterations:  config.Iterations,
		MinTime:     time.Hour,
		Percentiles: make(map[int]time.Duration),
		Metadata:    make(map[string]interface{}),
	}

	times := make([]time.Duration, 0, config.Iterations)
	start := time.Now()

	for i := 0; i < config.Iterations; i++ {
		select {
		case <-ctx.Done():
			return nil, fmt.Errorf("benchmark cancelled: %w", ctx.Err())
		default:
		}

		iterStart := time.Now()
		err := fn()
		iterDuration := time.Since(iterStart)

		if err != nil {
			result.Errors++
			b.logger.WithFields(
				logging.StringField("error", err.Error()),
				logging.IntField("iteration", i),
			).Warn("Benchmark iteration failed")
		}

		times = append(times, iterDuration)
		result.TotalTime += iterDuration

		if iterDuration < result.MinTime {
			result.MinTime = iterDuration
		}
		if iterDuration > result.MaxTime {
			result.MaxTime = iterDuration
		}
	}

	result.TotalTime = time.Since(start)
	result.AverageTime = result.TotalTime / time.Duration(config.Iterations)
	result.Throughput = float64(config.Iterations) / result.TotalTime.Seconds()
	result.ErrorRate = float64(result.Errors) / float64(config.Iterations)

	// Calculate percentiles
	if len(config.Percentiles) > 0 {
		result.Percentiles = calculatePercentiles(times, config.Percentiles)
	}

	// Record memory usage
	if config.RecordMemory {
		var m2 runtime.MemStats
		runtime.ReadMemStats(&m2)
		result.MemoryUsage = m2.TotalAlloc - m1.TotalAlloc
	}

	// Record metrics
	b.recordMetrics(config, result)

	b.logger.WithFields(
		logging.StringField("name", result.Name),
		logging.DurationField("total_time", result.TotalTime),
		logging.DurationField("average_time", result.AverageTime),
		logging.StringField("throughput", fmt.Sprintf("%.2f", result.Throughput)),
		logging.IntField("errors", result.Errors),
		logging.StringField("error_rate", fmt.Sprintf("%.2f%%", result.ErrorRate*100)),
	).Info("Benchmark completed")

	return result, nil
}

// RunConcurrent executes a concurrent benchmark
func (b *benchmarker) RunConcurrent(ctx context.Context, config BenchmarkConfig, fn func(int) error) (*BenchmarkResult, error) {
	b.logger.WithFields(
		logging.StringField("name", config.Name),
		logging.StringField("type", string(config.Type)),
		logging.IntField("iterations", config.Iterations),
		logging.IntField("concurrency", config.Concurrency),
	).Info("Starting concurrent benchmark")

	if config.Concurrency <= 0 {
		config.Concurrency = runtime.NumCPU()
	}

	// Warmup
	if config.WarmupIterations > 0 {
		b.logger.Info("Running warmup iterations")
		var wg sync.WaitGroup
		for i := 0; i < config.Concurrency; i++ {
			wg.Add(1)
			go func(workerID int) {
				defer wg.Done()
				for j := 0; j < config.WarmupIterations/config.Concurrency; j++ {
					if err := fn(workerID); err != nil {
						b.logger.WithFields(
							logging.StringField("error", err.Error()),
							logging.IntField("worker", workerID),
						).Warn("Warmup iteration failed")
					}
				}
			}(i)
		}
		wg.Wait()
	}

	// Force garbage collection before benchmark
	runtime.GC()
	var m1 runtime.MemStats
	runtime.ReadMemStats(&m1)

	result := &BenchmarkResult{
		Name:        config.Name,
		Type:        config.Type,
		Iterations:  config.Iterations,
		MinTime:     time.Hour,
		Percentiles: make(map[int]time.Duration),
		Metadata:    make(map[string]interface{}),
	}

	// Create channels for coordination
	workChan := make(chan int, config.Iterations)
	resultChan := make(chan struct {
		duration time.Duration
		err      error
	}, config.Iterations)

	// Start workers
	var wg sync.WaitGroup
	for i := 0; i < config.Concurrency; i++ {
		wg.Add(1)
		go func(workerID int) {
			defer wg.Done()
			for range workChan {
				select {
				case <-ctx.Done():
					return
				default:
				}

				iterStart := time.Now()
				err := fn(workerID)
				iterDuration := time.Since(iterStart)

				resultChan <- struct {
					duration time.Duration
					err      error
				}{duration: iterDuration, err: err}
			}
		}(i)
	}

	// Start benchmark
	start := time.Now()

	// Feed work to workers
	go func() {
		for iteration := 0; iteration < config.Iterations; iteration++ {
			select {
			case <-ctx.Done():
				close(workChan)
				return
			case workChan <- iteration:
			}
		}
		close(workChan)
	}()

	// Collect results
	var times []time.Duration
	for i := 0; i < config.Iterations; i++ {
		res := <-resultChan
		if res.err != nil {
			result.Errors++
		}
		times = append(times, res.duration)
		result.TotalTime += res.duration

		if res.duration < result.MinTime {
			result.MinTime = res.duration
		}
		if res.duration > result.MaxTime {
			result.MaxTime = res.duration
		}
	}

	wg.Wait()
	close(resultChan)

	result.TotalTime = time.Since(start)
	result.AverageTime = result.TotalTime / time.Duration(config.Iterations)
	result.Throughput = float64(config.Iterations) / result.TotalTime.Seconds()
	result.ErrorRate = float64(result.Errors) / float64(config.Iterations)

	// Calculate percentiles
	if len(config.Percentiles) > 0 {
		result.Percentiles = calculatePercentiles(times, config.Percentiles)
	}

	// Record memory usage
	if config.RecordMemory {
		var m2 runtime.MemStats
		runtime.ReadMemStats(&m2)
		result.MemoryUsage = m2.TotalAlloc - m1.TotalAlloc
	}

	// Record metrics
	b.recordMetrics(config, result)

	b.logger.WithFields(
		logging.StringField("name", result.Name),
		logging.IntField("concurrency", config.Concurrency),
		logging.DurationField("total_time", result.TotalTime),
		logging.DurationField("average_time", result.AverageTime),
		logging.StringField("throughput", fmt.Sprintf("%.2f", result.Throughput)),
		logging.IntField("errors", result.Errors),
		logging.StringField("error_rate", fmt.Sprintf("%.2f%%", result.ErrorRate*100)),
	).Info("Concurrent benchmark completed")

	return result, nil
}

// BenchmarkComparison represents a comparison of benchmark results
type BenchmarkComparison struct {
	Baseline   *BenchmarkResult
	Comparisons []BenchmarkComparisonResult
}

// BenchmarkComparisonResult represents a single comparison result
type BenchmarkComparisonResult struct {
	Result           *BenchmarkResult
	LatencyImprovement float64 // percentage improvement (negative means regression)
	ThroughputImprovement float64 // percentage improvement
	MemoryImprovement  float64 // percentage improvement
}

// Compare compares multiple benchmark results
func (b *benchmarker) Compare(results []*BenchmarkResult) *BenchmarkComparison {
	if len(results) == 0 {
		return nil
	}

	comparison := &BenchmarkComparison{
		Baseline:    results[0],
		Comparisons: make([]BenchmarkComparisonResult, 0, len(results)-1),
	}

	baseline := results[0]
	for i := 1; i < len(results); i++ {
		result := results[i]
		compResult := BenchmarkComparisonResult{
			Result: result,
		}

		// Calculate improvements
		if baseline.AverageTime > 0 {
			compResult.LatencyImprovement = float64(baseline.AverageTime-result.AverageTime) / float64(baseline.AverageTime) * 100
		}
		if baseline.Throughput > 0 {
			compResult.ThroughputImprovement = (result.Throughput - baseline.Throughput) / baseline.Throughput * 100
		}
		if baseline.MemoryUsage > 0 {
			compResult.MemoryImprovement = float64(baseline.MemoryUsage-result.MemoryUsage) / float64(baseline.MemoryUsage) * 100
		}

		comparison.Comparisons = append(comparison.Comparisons, compResult)
	}

	return comparison
}

// GenerateReport generates a benchmark report
func (b *benchmarker) GenerateReport(results []*BenchmarkResult) string {
	if len(results) == 0 {
		return "No benchmark results to report"
	}

	report := fmt.Sprintf("Benchmark Report\n")
	report += fmt.Sprintf("==================\n\n")

	for i, result := range results {
		report += fmt.Sprintf("Test %d: %s (%s)\n", i+1, result.Name, result.Type)
		report += fmt.Sprintf("  Iterations: %d\n", result.Iterations)
		report += fmt.Sprintf("  Total Time: %v\n", result.TotalTime)
		report += fmt.Sprintf("  Average Time: %v\n", result.AverageTime)
		report += fmt.Sprintf("  Min Time: %v\n", result.MinTime)
		report += fmt.Sprintf("  Max Time: %v\n", result.MaxTime)
		report += fmt.Sprintf("  Throughput: %.2f ops/sec\n", result.Throughput)
		report += fmt.Sprintf("  Error Rate: %.2f%%\n", result.ErrorRate*100)

		if result.MemoryUsage > 0 {
			report += fmt.Sprintf("  Memory Usage: %d bytes\n", result.MemoryUsage)
		}

		if len(result.Percentiles) > 0 {
			report += fmt.Sprintf("  Percentiles:\n")
			for percentile, duration := range result.Percentiles {
				report += fmt.Sprintf("    P%d: %v\n", percentile, duration)
			}
		}

		report += "\n"
	}

	// Add comparison if multiple results
	if len(results) > 1 {
		comparison := b.Compare(results)
		report += "Performance Comparison\n"
		report += "======================\n\n"

		for i, comp := range comparison.Comparisons {
			report += fmt.Sprintf("Comparison %d: %s vs %s\n", i+1, comparison.Baseline.Name, comp.Result.Name)
			report += fmt.Sprintf("  Latency Improvement: %.2f%%\n", comp.LatencyImprovement)
			report += fmt.Sprintf("  Throughput Improvement: %.2f%%\n", comp.ThroughputImprovement)
			report += fmt.Sprintf("  Memory Improvement: %.2f%%\n", comp.MemoryImprovement)
			report += "\n"
		}
	}

	return report
}

// recordMetrics records benchmark metrics
func (b *benchmarker) recordMetrics(config BenchmarkConfig, result *BenchmarkResult) {
	// Record operation metrics
	metrics.RecordHistogram("benchmark_duration_seconds", result.AverageTime.Seconds(),
		config.Name, string(config.Type))

	metrics.RecordHistogram("benchmark_throughput_ops", result.Throughput,
		config.Name, string(config.Type))

	metrics.IncrementCounter("benchmark_operations_total",
		config.Name, string(config.Type), "success")

	if result.Errors > 0 {
		metrics.AddCounter("benchmark_operations_total", float64(result.Errors),
			config.Name, string(config.Type), "error")
	}

	// Record memory usage
	if result.MemoryUsage > 0 {
		metrics.SetGauge("benchmark_memory_usage_bytes", float64(result.MemoryUsage),
			config.Name, string(config.Type))
	}

	// Record percentiles
	for percentile, duration := range result.Percentiles {
		metrics.RecordHistogram("benchmark_percentile_duration_seconds", duration.Seconds(),
			config.Name, string(config.Type), fmt.Sprintf("p%d", percentile))
	}
}

// calculatePercentiles calculates percentiles from a list of durations
func calculatePercentiles(durations []time.Duration, percentiles []int) map[int]time.Duration {
	if len(durations) == 0 || len(percentiles) == 0 {
		return make(map[int]time.Duration)
	}

	// Sort durations
	sorted := make([]time.Duration, len(durations))
	copy(sorted, durations)
	for i := 0; i < len(sorted); i++ {
		for j := i + 1; j < len(sorted); j++ {
			if sorted[i] > sorted[j] {
				sorted[i], sorted[j] = sorted[j], sorted[i]
			}
		}
	}

	result := make(map[int]time.Duration)
	for _, percentile := range percentiles {
		index := (len(sorted) * percentile) / 100
		if index >= len(sorted) {
			index = len(sorted) - 1
		}
		result[percentile] = sorted[index]
	}

	return result
}

// Convenience functions for common benchmarks

// BenchmarkLatency benchmarks operation latency
func BenchmarkLatency(ctx context.Context, name string, iterations int, fn func() error) (*BenchmarkResult, error) {
	config := BenchmarkConfig{
		Name:       name,
		Type:       TypeLatency,
		Iterations: iterations,
		Percentiles: []int{50, 90, 95, 99},
	}

	benchmarker := NewBenchmarker()
	return benchmarker.Run(ctx, config, fn)
}

// BenchmarkThroughput benchmarks operation throughput
func BenchmarkThroughput(ctx context.Context, name string, duration time.Duration, fn func() error) (*BenchmarkResult, error) {
	config := BenchmarkConfig{
		Name:      name,
		Type:      TypeThroughput,
		Duration:  duration,
		Percentiles: []int{50, 90, 95, 99},
	}

	// Calculate iterations based on duration
	benchmarker := NewBenchmarker()
	return benchmarker.Run(ctx, config, fn)
}

// BenchmarkMemory benchmarks memory usage
func BenchmarkMemory(ctx context.Context, name string, iterations int, fn func() error) (*BenchmarkResult, error) {
	config := BenchmarkConfig{
		Name:         name,
		Type:         TypeMemory,
		Iterations:   iterations,
		RecordMemory: true,
		Percentiles:  []int{50, 90, 95, 99},
	}

	benchmarker := NewBenchmarker()
	return benchmarker.Run(ctx, config, fn)
}