// Package network provides network optimization features for on-demand loading
package network

import (
	"context"
	"fmt"
	"io"
	"net/http"
	"os"
	"sync"
	"sync/atomic"
	"time"

	"github.com/smart-snapshotter/internal/logging"
)

// ParallelDownloader implements parallel chunked downloads
type ParallelDownloader struct {
	config     *ParallelConfig
	client     *http.Client
	logger     logging.Logger
	chunkPool  sync.Pool
}

// ParallelConfig contains parallel download configuration
type ParallelConfig struct {
	MaxConnections  int           // Maximum parallel connections per download
	ChunkSize       int64         // Size of each chunk
	MinChunkSize    int64         // Minimum chunk size
	MaxChunkSize    int64         // Maximum chunk size
	RetryAttempts   int           // Number of retry attempts per chunk
	RetryDelay      time.Duration // Delay between retries
	ConnectionTimeout time.Duration // Connection timeout
}

// ChunkDownloader handles individual chunk downloads
type ChunkDownloader struct {
	id         int
	downloadTask *Download
	chunk      *Chunk
	client     *http.Client
	logger     logging.Logger
	config     *ParallelConfig
	bandwidthLimiter *BandwidthLimiter
}

// NewParallelDownloader creates a new parallel downloader
func NewParallelDownloader(config *ParallelConfig, logger logging.Logger) *ParallelDownloader {
	if config.MaxConnections <= 0 {
		config.MaxConnections = 4
	}
	if config.ChunkSize <= 0 {
		config.ChunkSize = 1024 * 1024 // 1MB
	}
	if config.MinChunkSize <= 0 {
		config.MinChunkSize = 64 * 1024 // 64KB
	}
	if config.MaxChunkSize <= 0 {
		config.MaxChunkSize = 10 * 1024 * 1024 // 10MB
	}
	if config.RetryAttempts <= 0 {
		config.RetryAttempts = 3
	}
	if config.RetryDelay <= 0 {
		config.RetryDelay = time.Second
	}
	if config.ConnectionTimeout <= 0 {
		config.ConnectionTimeout = 30 * time.Second
	}

	return &ParallelDownloader{
		config: config,
		client: &http.Client{
			Timeout: config.ConnectionTimeout,
		},
		logger: logger,
		chunkPool: sync.Pool{
			New: func() interface{} {
				return make([]byte, 32*1024) // 32KB buffer
			},
		},
	}
}

// Download performs parallel download of a file
func (pd *ParallelDownloader) Download(ctx context.Context, download *Download, bandwidthLimiter *BandwidthLimiter) error {
	pd.logger.Info("Starting parallel download", StringField("downloadID", download.ID), StringField("url", download.URL))

	// Get file size first
	fileSize, supportsRange, err := pd.getFileInfo(ctx, download.URL)
	if err != nil {
		return fmt.Errorf("failed to get file info: %w", err)
	}

	download.Size = fileSize
	download.ResumeSupport = supportsRange

	// If file is too small or server doesn't support range requests, use single connection
	if fileSize < pd.config.MinChunkSize*2 || !supportsRange {
		pd.logger.Info("File too small or no range support, using single connection",
			StringField("downloadID", download.ID), StringField("size", fmt.Sprintf("%d", fileSize)), StringField("supportsRange", fmt.Sprintf("%t", supportsRange)))
		return pd.downloadSingle(ctx, download, bandwidthLimiter)
	}

	// Calculate optimal chunk size and count
	chunkSize, chunkCount := pd.calculateOptimalChunks(fileSize)
	pd.logger.Info("Parallel download configuration",
		StringField("downloadID", download.ID), StringField("chunks", fmt.Sprintf("%d", chunkCount)), StringField("chunkSize", fmt.Sprintf("%d", chunkSize)))

	// Create chunks
	chunks := pd.createChunks(download, chunkSize, chunkCount)
	download.Chunks = chunks

	// Start parallel chunk downloads
	return pd.downloadChunks(ctx, download, chunks, bandwidthLimiter)
}

// getFileInfo gets file size and range support information
func (pd *ParallelDownloader) getFileInfo(ctx context.Context, url string) (int64, bool, error) {
	req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
	if err != nil {
		return 0, false, fmt.Errorf("failed to create HEAD request: %w", err)
	}

	resp, err := pd.client.Do(req)
	if err != nil {
		return 0, false, fmt.Errorf("failed to execute HEAD request: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return 0, false, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
	}

	contentLength := resp.ContentLength
	supportsRange := resp.Header.Get("Accept-Ranges") == "bytes"

	return contentLength, supportsRange, nil
}

// calculateOptimalChunks calculates optimal chunk size and count
func (pd *ParallelDownloader) calculateOptimalChunks(fileSize int64) (int64, int) {
	// Start with configured chunk size
	chunkSize := pd.config.ChunkSize

	// Adjust based on file size
	if fileSize/int64(pd.config.MaxConnections) < chunkSize {
		chunkSize = fileSize / int64(pd.config.MaxConnections)
	}

	// Ensure minimum chunk size
	if chunkSize < pd.config.MinChunkSize {
		chunkSize = pd.config.MinChunkSize
	}

	// Ensure maximum chunk size
	if chunkSize > pd.config.MaxChunkSize {
		chunkSize = pd.config.MaxChunkSize
	}

	// Calculate chunk count
	chunkCount := int((fileSize + chunkSize - 1) / chunkSize)

	// Limit to max connections
	if chunkCount > pd.config.MaxConnections {
		chunkCount = pd.config.MaxConnections
		chunkSize = (fileSize + int64(chunkCount) - 1) / int64(chunkCount)
	}

	return chunkSize, chunkCount
}

// createChunks creates download chunks
func (pd *ParallelDownloader) createChunks(download *Download, chunkSize int64, chunkCount int) []*Chunk {
	chunks := make([]*Chunk, chunkCount)

	for i := 0; i < chunkCount; i++ {
		start := int64(i) * chunkSize
		end := start + chunkSize - 1

		// Adjust last chunk
		if i == chunkCount-1 {
			end = download.Size - 1
		}

		chunks[i] = &Chunk{
			ID:     i,
			Start:  start,
			End:    end,
			Status: "pending",
		}
	}

	return chunks
}

// downloadChunks downloads chunks in parallel
func (pd *ParallelDownloader) downloadChunks(ctx context.Context, download *Download, chunks []*Chunk, bandwidthLimiter *BandwidthLimiter) error {
	// Create temporary files for each chunk
	tempFiles := make([]*os.File, len(chunks))
	for i, chunk := range chunks {
		tempFile, err := os.CreateTemp("", fmt.Sprintf("chunk_%d_", chunk.ID))
		if err != nil {
			// Clean up created files
			for j := 0; j < i; j++ {
				if tempFiles[j] != nil {
					tempFiles[j].Close()
					os.Remove(tempFiles[j].Name())
				}
			}
			return fmt.Errorf("failed to create temp file for chunk %d: %w", chunk.ID, err)
		}
		tempFiles[i] = tempFile
		defer tempFile.Close()
	}

	// Start chunk downloads
	var wg sync.WaitGroup
	errChan := make(chan error, len(chunks))
	semaphore := make(chan struct{}, pd.config.MaxConnections)

	for i, chunk := range chunks {
		wg.Add(1)
		semaphore <- struct{}{} // Acquire semaphore

		go func(chunkID int, chunk *Chunk) {
			defer wg.Done()
			defer func() { <-semaphore }() // Release semaphore

			chunkDownloader := &ChunkDownloader{
				id:               chunkID,
				downloadTask:     download,
				chunk:            chunk,
				client:           pd.client,
				logger:           pd.logger,
				config:           pd.config,
				bandwidthLimiter: bandwidthLimiter,
			}

			if err := chunkDownloader.download(ctx); err != nil {
				errChan <- fmt.Errorf("chunk %d download failed: %w", chunkID, err)
			}
		}(i, chunk)
	}

	// Wait for all chunks to complete
	wg.Wait()
	close(errChan)

	// Check for errors
	for err := range errChan {
		if err != nil {
			// Clean up temp files
			for _, tempFile := range tempFiles {
				os.Remove(tempFile.Name())
			}
			return err
		}
	}

	// Merge chunks into final file
	return pd.mergeChunks(download, chunks, tempFiles)
}

// downloadSingle performs single-threaded download (fallback)
func (pd *ParallelDownloader) downloadSingle(ctx context.Context, download *Download, bandwidthLimiter *BandwidthLimiter) error {
	req, err := http.NewRequestWithContext(ctx, "GET", download.URL, nil)
	if err != nil {
		return fmt.Errorf("failed to create request: %w", err)
	}

	resp, err := pd.client.Do(req)
	if err != nil {
		return fmt.Errorf("failed to execute request: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
	}

	// Create output file
	file, err := os.Create(download.TempPath)
	if err != nil {
		return fmt.Errorf("failed to create temp file: %w", err)
	}
	defer file.Close()

	// Download with bandwidth limiting
	reader := bandwidthLimiter.Reader(resp.Body)

	buffer := pd.chunkPool.Get().([]byte)
	defer pd.chunkPool.Put(buffer)

	totalBytes := int64(0)
	for {
		select {
		case <-ctx.Done():
			return ctx.Err()
		default:
		}

		n, err := reader.Read(buffer)
		if n > 0 {
			if _, writeErr := file.Write(buffer[:n]); writeErr != nil {
				return fmt.Errorf("failed to write to file: %w", writeErr)
			}
			totalBytes += int64(n)
			atomic.StoreInt64(&download.Downloaded, totalBytes)
		}

		if err == io.EOF {
			break
		}
		if err != nil {
			return fmt.Errorf("failed to read response: %w", err)
		}
	}

	download.Size = totalBytes
	return nil
}

// mergeChunks merges downloaded chunks into final file
func (pd *ParallelDownloader) mergeChunks(download *Download, chunks []*Chunk, tempFiles []*os.File) error {
	// Create final file
	finalFile, err := os.Create(download.TempPath)
	if err != nil {
		return fmt.Errorf("failed to create final file: %w", err)
	}
	defer finalFile.Close()

	// Merge chunks in order
	for i, chunk := range chunks {
		if chunk.Status != "completed" {
			return fmt.Errorf("chunk %d not completed", i)
		}

		// Seek to beginning of temp file
		if _, err := tempFiles[i].Seek(0, 0); err != nil {
			return fmt.Errorf("failed to seek temp file %d: %w", i, err)
		}

		// Copy chunk data to final file
		if _, err := io.Copy(finalFile, tempFiles[i]); err != nil {
			return fmt.Errorf("failed to copy chunk %d: %w", i, err)
		}

		// Clean up temp file
		tempFiles[i].Close()
		os.Remove(tempFiles[i].Name())
	}

	return nil
}

// download downloads a single chunk
func (cd *ChunkDownloader) download(ctx context.Context) error {
	cd.chunk.Status = "downloading"
	cd.logger.Info("Starting chunk download",
		StringField("downloadID", cd.downloadTask.ID), StringField("chunkID", fmt.Sprintf("%d", cd.chunk.ID)),
		StringField("start", fmt.Sprintf("%d", cd.chunk.Start)), StringField("end", fmt.Sprintf("%d", cd.chunk.End)))

	for attempt := 0; attempt <= cd.config.RetryAttempts; attempt++ {
		if attempt > 0 {
			cd.logger.Info("Retrying chunk download",
				StringField("downloadID", cd.downloadTask.ID), StringField("chunkID", fmt.Sprintf("%d", cd.chunk.ID)), StringField("attempt", fmt.Sprintf("%d", attempt)))
			time.Sleep(cd.config.RetryDelay * time.Duration(attempt))
		}

		err := cd.downloadChunk(ctx)
		if err == nil {
			cd.chunk.Status = "completed"
			cd.logger.Info("Chunk download completed",
				StringField("downloadID", cd.downloadTask.ID), StringField("chunkID", fmt.Sprintf("%d", cd.chunk.ID)))
			return nil
		}

		if attempt == cd.config.RetryAttempts {
			cd.chunk.Status = "failed"
			cd.chunk.Error = err
			return fmt.Errorf("chunk download failed after %d attempts: %w", cd.config.RetryAttempts+1, err)
		}

		cd.logger.Warn("Chunk download attempt failed",
			StringField("downloadID", cd.downloadTask.ID), StringField("chunkID", fmt.Sprintf("%d", cd.chunk.ID)), StringField("error", err.Error()))
	}

	return nil
}

// downloadChunk downloads a single chunk
func (cd *ChunkDownloader) downloadChunk(ctx context.Context) error {
	req, err := http.NewRequestWithContext(ctx, "GET", cd.downloadTask.URL, nil)
	if err != nil {
		return fmt.Errorf("failed to create request: %w", err)
	}

	// Set range header for chunk
	req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", cd.chunk.Start, cd.chunk.End))

	resp, err := cd.client.Do(req)
	if err != nil {
		return fmt.Errorf("failed to execute request: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusPartialContent {
		return fmt.Errorf("unexpected status code for range request: %d", resp.StatusCode)
	}

	// Create temp file for chunk
	tempFile, err := os.CreateTemp("", fmt.Sprintf("chunk_%d_", cd.chunk.ID))
	if err != nil {
		return fmt.Errorf("failed to create temp file: %w", err)
	}
	defer tempFile.Close()
	defer os.Remove(tempFile.Name())

	// Download chunk with bandwidth limiting
	reader := cd.bandwidthLimiter.Reader(resp.Body)

	buffer := make([]byte, 32*1024) // 32KB buffer
	downloaded := int64(0)

	for {
		select {
		case <-ctx.Done():
			return ctx.Err()
		default:
		}

		n, err := reader.Read(buffer)
		if n > 0 {
			if _, writeErr := tempFile.Write(buffer[:n]); writeErr != nil {
				return fmt.Errorf("failed to write to temp file: %w", writeErr)
			}
			downloaded += int64(n)
		}

		if err == io.EOF {
			break
		}
		if err != nil {
			return fmt.Errorf("failed to read response: %w", err)
		}
	}

	// Verify chunk size
	expectedSize := cd.chunk.End - cd.chunk.Start + 1
	if downloaded != expectedSize {
		return fmt.Errorf("chunk size mismatch: expected %d, got %d", expectedSize, downloaded)
	}

	// Update chunk progress
	cd.chunk.Downloaded = downloaded
	atomic.AddInt64(&cd.downloadTask.Downloaded, downloaded)

	return nil
}

// GetOptimalChunkSize calculates optimal chunk size based on network conditions
func GetOptimalChunkSize(fileSize int64, networkSpeed int64, minChunks int) int64 {
	// Base chunk size on network speed (5 seconds of download time)
	baseChunkSize := networkSpeed * 5

	// Adjust based on file size
	if fileSize < baseChunkSize*2 {
		return fileSize / 2
	}

	// Ensure minimum number of chunks
	minChunkSize := fileSize / int64(minChunks)
	if baseChunkSize < minChunkSize {
		baseChunkSize = minChunkSize
	}

	// Round to nearest 64KB
	return (baseChunkSize/(64*1024) + 1) * 64 * 1024
}