// Package storage provides performance optimization utilities for storage backends
package storage

import (
	"context"
	"fmt"
	"io"
	"os"
	"path/filepath"
	"runtime"
	"sync"
	"syscall"
	"time"
)

// PerformanceOptimizer handles storage performance optimizations
type PerformanceOptimizer struct {
	config           PerformanceConfig
	cache            *FileCache
	prefetcher       *Prefetcher
	ioScheduler      *IOScheduler
	stats            PerformanceStats
	mu               sync.RWMutex
}

// PerformanceConfig represents performance optimization configuration
type PerformanceConfig struct {
	EnableCache        bool
	CacheSize          int64
	CacheExpiry        time.Duration
	EnablePrefetch     bool
	PrefetchThreshold  int
	MaxConcurrentIO    int
	ReadAheadSize      int64
	WriteBufferSize    int
	CompressionLevel   int
	EnableDirectIO     bool
	EnableAsyncIO      bool
}

// DefaultPerformanceConfig returns default performance configuration
func DefaultPerformanceConfig() PerformanceConfig {
	return PerformanceConfig{
		EnableCache:        true,
		CacheSize:          100 * 1024 * 1024, // 100MB
		CacheExpiry:        5 * time.Minute,
		EnablePrefetch:     true,
		PrefetchThreshold:  3,
		MaxConcurrentIO:    runtime.NumCPU() * 2,
		ReadAheadSize:      64 * 1024, // 64KB
		WriteBufferSize:    4096, // 4KB
		CompressionLevel:   1, // Fast compression
		EnableDirectIO:     false,
		EnableAsyncIO:      true,
	}
}

// NewPerformanceOptimizer creates a new performance optimizer
func NewPerformanceOptimizer(config PerformanceConfig) (*PerformanceOptimizer, error) {
	po := &PerformanceOptimizer{
		config: config,
	}

	if config.EnableCache {
		po.cache = NewFileCache(config.CacheSize, config.CacheExpiry)
	}

	if config.EnablePrefetch {
		po.prefetcher = NewPrefetcher(config.PrefetchThreshold)
	}

	po.ioScheduler = NewIOScheduler(config.MaxConcurrentIO)

	return po, nil
}

// ReadFile reads a file with optimizations
func (po *PerformanceOptimizer) ReadFile(ctx context.Context, path string) ([]byte, error) {
	startTime := time.Now()
	defer func() {
		po.updateStats("read", time.Since(startTime), 0)
	}()

	// Check cache first
	if po.cache != nil {
		if data, found := po.cache.Get(path); found {
			po.updateStats("cache_hit", 0, 1)
			return data, nil
		}
	}

	po.updateStats("cache_miss", 0, 1)

	// Use optimized file reading
	data, err := po.readFileOptimized(ctx, path)
	if err != nil {
		return nil, err
	}

	// Cache the result
	if po.cache != nil {
		po.cache.Set(path, data)
	}

	// Trigger prefetch for related files
	if po.prefetcher != nil {
		po.prefetcher.TriggerPrefetch(path)
	}

	return data, nil
}

// WriteFile writes a file with optimizations
func (po *PerformanceOptimizer) WriteFile(ctx context.Context, path string, data []byte) error {
	startTime := time.Now()
	defer func() {
		po.updateStats("write", time.Since(startTime), int64(len(data)))
	}()

	// Use optimized file writing
	err := po.writeFileOptimized(ctx, path, data)
	if err != nil {
		return err
	}

	// Update cache
	if po.cache != nil {
		po.cache.Set(path, data)
	}

	return nil
}

// readFileOptimized reads a file with performance optimizations
func (po *PerformanceOptimizer) readFileOptimized(ctx context.Context, path string) ([]byte, error) {
	// Schedule IO operation
	if err := po.ioScheduler.Schedule(ctx); err != nil {
		return nil, err
	}
	defer po.ioScheduler.Complete()

	// Open file with optimizations
	file, err := po.openFileOptimized(path, os.O_RDONLY, 0)
	if err != nil {
		return nil, fmt.Errorf("failed to open file: %w", err)
	}
	defer file.Close()

	// Get file size
	stat, err := file.Stat()
	if err != nil {
		return nil, fmt.Errorf("failed to stat file: %w", err)
	}

	size := stat.Size()
	if size == 0 {
		return []byte{}, nil
	}

	// Read with read-ahead
	buffer := make([]byte, size)
	totalRead := int64(0)

	for totalRead < size {
		readSize := po.config.ReadAheadSize
		if remaining := size - totalRead; remaining < readSize {
			readSize = remaining
		}

		n, err := file.Read(buffer[totalRead : totalRead+readSize])
		if err != nil && err != io.EOF {
			return nil, fmt.Errorf("failed to read file: %w", err)
		}

		totalRead += int64(n)
		if err == io.EOF {
			break
		}
	}

	return buffer[:totalRead], nil
}

// writeFileOptimized writes a file with performance optimizations
func (po *PerformanceOptimizer) writeFileOptimized(ctx context.Context, path string, data []byte) error {
	// Schedule IO operation
	if err := po.ioScheduler.Schedule(ctx); err != nil {
		return err
	}
	defer po.ioScheduler.Complete()

	// Create directory if needed
	dir := filepath.Dir(path)
	if err := os.MkdirAll(dir, 0755); err != nil {
		return fmt.Errorf("failed to create directory: %w", err)
	}

	// Open file with optimizations
	file, err := po.openFileOptimized(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return fmt.Errorf("failed to open file: %w", err)
	}
	defer file.Close()

	// Write in chunks with buffering
	chunkSize := po.config.WriteBufferSize
	for i := 0; i < len(data); i += chunkSize {
		end := i + chunkSize
		if end > len(data) {
			end = len(data)
		}

		if _, err := file.Write(data[i:end]); err != nil {
			return fmt.Errorf("failed to write file: %w", err)
		}

		// Sync periodically for better performance
		if (i/chunkSize)%10 == 0 {
			if err := file.Sync(); err != nil {
				return fmt.Errorf("failed to sync file: %w", err)
			}
		}
	}

	// Final sync
	return file.Sync()
}

// openFileOptimized opens a file with performance optimizations
func (po *PerformanceOptimizer) openFileOptimized(path string, flag int, perm os.FileMode) (*os.File, error) {
	if po.config.EnableDirectIO {
		flag |= syscall.O_DIRECT
	}

	file, err := os.OpenFile(path, flag, perm)
	if err != nil {
		return nil, err
	}

	// Set performance optimizations
	if err := po.optimizeFileHandle(file); err != nil {
		file.Close()
		return nil, err
	}

	return file, nil
}

// optimizeFileHandle applies optimizations to a file handle
func (po *PerformanceOptimizer) optimizeFileHandle(file *os.File) error {
	// Note: Setting socket options on file descriptors is not applicable
	// These optimizations are more appropriate for network sockets
	// For file I/O optimization, we rely on other techniques like buffering

	// In a real implementation, you might set file-specific optimizations here
	// such as advisory locking, file locking, etc.

	return nil
}

// updateStats updates performance statistics
func (po *PerformanceOptimizer) updateStats(operation string, duration time.Duration, bytes int64) {
	po.mu.Lock()
	defer po.mu.Unlock()

	switch operation {
	case "read":
		po.stats.ReadCount++
		po.stats.TotalReadTime += duration
		po.stats.TotalReadBytes += bytes
	case "write":
		po.stats.WriteCount++
		po.stats.TotalWriteTime += duration
		po.stats.TotalWriteBytes += bytes
	case "cache_hit":
		po.stats.CacheHits++
	case "cache_miss":
		po.stats.CacheMisses++
	}
}

// GetStats returns performance statistics
func (po *PerformanceOptimizer) GetStats() PerformanceStats {
	po.mu.RLock()
	defer po.mu.RUnlock()

	return po.stats
}

// PerformanceStats represents performance statistics
type PerformanceStats struct {
	ReadCount        int64
	WriteCount       int64
	CacheHits        int64
	CacheMisses      int64
	TotalReadTime    time.Duration
	TotalWriteTime   time.Duration
	TotalReadBytes   int64
	TotalWriteBytes  int64
}

// GetReadThroughput returns read throughput in bytes per second
func (ps *PerformanceStats) GetReadThroughput() float64 {
	if ps.TotalReadTime == 0 {
		return 0
	}
	return float64(ps.TotalReadBytes) / ps.TotalReadTime.Seconds()
}

// GetWriteThroughput returns write throughput in bytes per second
func (ps *PerformanceStats) GetWriteThroughput() float64 {
	if ps.TotalWriteTime == 0 {
		return 0
	}
	return float64(ps.TotalWriteBytes) / ps.TotalWriteTime.Seconds()
}

// GetCacheHitRate returns cache hit rate as percentage
func (ps *PerformanceStats) GetCacheHitRate() float64 {
	total := ps.CacheHits + ps.CacheMisses
	if total == 0 {
		return 0
	}
	return float64(ps.CacheHits) / float64(total) * 100
}

// FileCache provides a simple file cache
type FileCache struct {
	mu         sync.RWMutex
	cache      map[string]*CacheEntry
	maxSize    int64
	currentSize int64
	expiry     time.Duration
}

type CacheEntry struct {
	Data      []byte
	Size      int64
	Accessed  time.Time
}

// NewFileCache creates a new file cache
func NewFileCache(maxSize int64, expiry time.Duration) *FileCache {
	return &FileCache{
		cache:  make(map[string]*CacheEntry),
		maxSize: maxSize,
		expiry: expiry,
	}
}

// Get retrieves data from cache
func (fc *FileCache) Get(key string) ([]byte, bool) {
	fc.mu.RLock()
	entry, exists := fc.cache[key]
	fc.mu.RUnlock()

	if !exists {
		return nil, false
	}

	// Check expiry
	if time.Since(entry.Accessed) > fc.expiry {
		fc.mu.Lock()
		delete(fc.cache, key)
		fc.currentSize -= entry.Size
		fc.mu.Unlock()
		return nil, false
	}

	// Update access time
	fc.mu.Lock()
	entry.Accessed = time.Now()
	fc.mu.Unlock()

	return entry.Data, true
}

// Set stores data in cache
func (fc *FileCache) Set(key string, data []byte) {
	size := int64(len(data))

	fc.mu.Lock()
	defer fc.mu.Unlock()

	// Check if we need to evict entries
	for fc.currentSize+size > fc.maxSize && len(fc.cache) > 0 {
		// Simple eviction: remove oldest entry
		var oldestKey string
		var oldestTime time.Time

		for k, v := range fc.cache {
			if oldestKey == "" || v.Accessed.Before(oldestTime) {
				oldestKey = k
				oldestTime = v.Accessed
			}
		}

		if oldestKey != "" {
			entry := fc.cache[oldestKey]
			delete(fc.cache, oldestKey)
			fc.currentSize -= entry.Size
		}
	}

	// Add new entry
	fc.cache[key] = &CacheEntry{
		Data:     data,
		Size:     size,
		Accessed: time.Now(),
	}
	fc.currentSize += size
}

// Prefetcher handles file prefetching
type Prefetcher struct {
	mu       sync.RWMutex
	accesses map[string]int
	threshold int
}

// NewPrefetcher creates a new prefetcher
func NewPrefetcher(threshold int) *Prefetcher {
	return &Prefetcher{
		accesses: make(map[string]int),
		threshold: threshold,
	}
}

// TriggerPrefetch triggers prefetch for related files
func (p *Prefetcher) TriggerPrefetch(path string) {
	p.mu.Lock()
	defer p.mu.Unlock()

	p.accesses[path]++

	// Simple prefetch logic: if accessed enough times, prefetch related files
	if p.accesses[path] >= p.threshold {
		// In a real implementation, this would prefetch related files
		// For now, we just reset the counter
		p.accesses[path] = 0
	}
}

// IOScheduler schedules IO operations to limit concurrency
type IOScheduler struct {
	sem chan struct{}
}

// NewIOScheduler creates a new IO scheduler
func NewIOScheduler(maxConcurrent int) *IOScheduler {
	return &IOScheduler{
		sem: make(chan struct{}, maxConcurrent),
	}
}

// Schedule waits for an available slot
func (ios *IOScheduler) Schedule(ctx context.Context) error {
	select {
	case ios.sem <- struct{}{}:
		return nil
	case <-ctx.Done():
		return ctx.Err()
	}
}

// Complete releases a slot
func (ios *IOScheduler) Complete() {
	<-ios.sem
}

// ResetStats resets performance statistics
func (po *PerformanceOptimizer) ResetStats() {
	po.mu.Lock()
	defer po.mu.Unlock()

	po.stats = PerformanceStats{}
}