package downloader

import (
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"net/http"
	"net/url"
	"os"
	"path/filepath"
	"strconv"
	"sync"
	"time"
)

type Progress struct {
	Total      int64
	Completed  int64
	SpeedBps   int64
	Source     string
	Peers      int
}

// internal progress tick used by segmented download to carry bytes and source
type progressTick struct {
	n   int64
	src string
}

type Options struct {
	ProxyURL        string
	SpeedLimitBytes int64
	Chunks          int
	MaxRetries      int
	RetryBackoffSec int
}

type Downloader struct {
	client *http.Client
	opt    Options
}

func New(opt Options) *Downloader {
	transport := &http.Transport{}
	if opt.ProxyURL != "" {
		if u, err := url.Parse(opt.ProxyURL); err == nil {
			transport.Proxy = http.ProxyURL(u)
		}
	}
	return &Downloader{
		client: &http.Client{Timeout: 0, Transport: transport},
		opt:    opt,
	}
}

// Download supports resume via HTTP Range and cancels via context.
// It accepts a primary URL and optional mirror sources for multi-source downloading.
func (d *Downloader) Download(ctx context.Context, primary string, sources []string, savePath string, progress func(Progress)) error {
	// Ensure dir
	if err := os.MkdirAll(filepath.Dir(savePath), 0o755); err != nil { return err }

	existing := int64(0)
	if fi, err := os.Stat(savePath); err == nil {
		existing = fi.Size()
	}

	// build candidates (primary first)
	candidates := make([]string, 0, 1+len(sources))
	if primary != "" { candidates = append(candidates, primary) }
	for _, s := range sources { if s != "" { candidates = append(candidates, s) } }
	if len(candidates) == 0 { return errors.New("no source url") }

	// probe head on first working source
	var (
		total int64 = -1
		acceptRange bool
		headErr error
		headETag string
		headLM string
	)
	for _, u := range candidates {
		if t, ar, et, lm, e := d.headOn(u); e == nil {
			total, acceptRange, headErr = t, ar, nil
			headETag, headLM = et, lm
			break
		} else {
			headErr = e
		}
	}
	if headErr != nil {
		// fallback: continue with first candidate without head
		total, acceptRange = -1, false
	}

	if total >= 0 && existing > total { existing = 0 }

	// If partial exists, fallback to single-stream resume for simplicity.
	if existing > 0 {
		return d.downloadSingleMulti(ctx, candidates, savePath, total, existing, acceptRange, progress)
	}

	// If we cannot get total length or server does not support range or chunks <=1, use single-stream.
	if total <= 0 || !acceptRange || d.opt.Chunks <= 1 {
		return d.downloadSingleMulti(ctx, candidates, savePath, total, 0, acceptRange, progress)
	}

	return d.downloadSegmented(ctx, candidates, savePath, total, d.opt.Chunks, headETag, headLM, progress)
}

// Single-stream download with optional resume when existing > 0.
// downloadSingleMulti tries candidates sequentially, keeping resume semantics when possible.
func (d *Downloader) downloadSingleMulti(ctx context.Context, candidates []string, savePath string, total, existing int64, acceptRange bool, progress func(Progress)) error {
	var lastErr error
	for _, u := range candidates {
		if err := d.downloadSingleOnce(ctx, u, savePath, total, existing, acceptRange, progress); err != nil {
			lastErr = err
			if ctx.Err() != nil { return ctx.Err() }
			continue
		}
		return nil
	}
	if lastErr == nil { lastErr = errors.New("no valid sources for single download") }
	return lastErr
}

// Single-stream download with optional resume when existing > 0 for a single URL.
func (d *Downloader) downloadSingleOnce(ctx context.Context, url, savePath string, total, existing int64, acceptRange bool, progress func(Progress)) error {
	f, err := os.OpenFile(savePath, os.O_CREATE|os.O_RDWR, 0o644)
	if err != nil { return err }
	defer f.Close()
	if existing > 0 { if _, err := f.Seek(existing, io.SeekStart); err != nil { return err } }

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
	if err != nil { return err }
	if acceptRange && existing > 0 {
		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", existing))
	}
	resp, err := d.client.Do(req)
	if err != nil { return err }
	defer resp.Body.Close()
	if resp.StatusCode >= 400 { return fmt.Errorf("http %d", resp.StatusCode) }
	if total < 0 {
		if cl := resp.Header.Get("Content-Length"); cl != "" {
			if n, e := strconv.ParseInt(cl, 10, 64); e == nil { total = n + existing }
		}
	}

	var last = time.Now()
	var lastBytes int64 = existing
	buf := make([]byte, 32*1024)
	for {
		// simple speed cap (approximate)
		if d.opt.SpeedLimitBytes > 0 {
			elapsed := time.Since(last)
			if elapsed > 0 {
				curSpeed := (existing - lastBytes) * int64(time.Second) / int64(elapsed)
				if curSpeed > d.opt.SpeedLimitBytes { time.Sleep(20 * time.Millisecond) }
			}
		}
		n, readErr := resp.Body.Read(buf)
		if n > 0 {
			wn, werr := f.Write(buf[:n])
			if werr != nil { return werr }
			if wn != n { return errors.New("short write") }
			existing += int64(n)
		}
		if time.Since(last) >= 500*time.Millisecond {
			elapsed := time.Since(last)
			speed := (existing - lastBytes) * int64(time.Second) / int64(elapsed)
			if progress != nil { progress(Progress{Total: total, Completed: existing, SpeedBps: speed, Source: url}) }
			last = time.Now(); lastBytes = existing
		}
		if readErr != nil {
			if readErr == io.EOF { if progress != nil { progress(Progress{Total: total, Completed: existing, Source: url}) }; return nil }
			return readErr
		}
	}
}

// Segmented concurrent download using HTTP Range and WriteAt.
func (d *Downloader) downloadSegmented(ctx context.Context, candidates []string, savePath string, total int64, chunks int, etag string, lastMod string, progress func(Progress)) error {
	f, err := os.OpenFile(savePath, os.O_CREATE|os.O_RDWR, 0o644)
	if err != nil { return err }
	defer f.Close()
	if err := f.Truncate(total); err != nil { return err }

	// Partition ranges
	partSize := total / int64(chunks)
	type part struct{ start, end int64 }
	parts := make([]part, 0, chunks)
	var start int64 = 0
	for i := 0; i < chunks; i++ {
		end := start + partSize - 1
		if i == chunks-1 { end = total - 1 }
		parts = append(parts, part{start: start, end: end})
		start = end + 1
	}

	// simple metadata to track completed parts
	type partMeta struct {
		Start int64 `json:"start"`
		End   int64 `json:"end"`
		Done  bool  `json:"done"`
	}
	type metaFile struct {
		Total       int64      `json:"total"`
		ETag        string     `json:"etag"`
		LastModified string    `json:"last_modified"`
		Parts       []partMeta `json:"parts"`
	}
	metaPath := savePath + ".meta.json"
	loadMeta := func() (*metaFile, error) {
		b, e := os.ReadFile(metaPath)
		if e != nil { return nil, e }
		var m metaFile
		if er := json.Unmarshal(b, &m); er != nil { return nil, er }
		if m.Total != total || len(m.Parts) != len(parts) { return nil, errors.New("meta mismatch") }
		return &m, nil
	}
	saveMeta := func(m *metaFile) {
		_ = os.WriteFile(metaPath, func() []byte {
			b, _ := json.MarshalIndent(m, "", "  ")
			return b
		}(), 0o644)
	}
	// init or load
	var meta *metaFile
	if mm, e := loadMeta(); e == nil {
		// compare etag/last-modified if provided
		if (etag != "" && mm.ETag != "" && etag != mm.ETag) || (lastMod != "" && mm.LastModified != "" && lastMod != mm.LastModified) {
			// reset file and meta
			_ = f.Truncate(0)
			if err := f.Truncate(total); err != nil { return err }
			pm := make([]partMeta, len(parts))
			for i, p := range parts { pm[i] = partMeta{Start: p.start, End: p.end, Done: false} }
			meta = &metaFile{Total: total, ETag: etag, LastModified: lastMod, Parts: pm}
			saveMeta(meta)
		} else {
			meta = mm
			// enrich with latest headers if empty
			if meta.ETag == "" { meta.ETag = etag }
			if meta.LastModified == "" { meta.LastModified = lastMod }
			saveMeta(meta)
		}
	} else {
		pm := make([]partMeta, len(parts))
		for i, p := range parts { pm[i] = partMeta{Start: p.start, End: p.end, Done: false} }
		meta = &metaFile{Total: total, ETag: etag, LastModified: lastMod, Parts: pm}
		saveMeta(meta)
	}

	var completed int64 = 0
	progressCh := make(chan progressTick, 1024)
	errCh := make(chan error, chunks)
	var wg sync.WaitGroup

	// progress aggregator
	done := make(chan struct{})
	go func() {
		last := time.Now()
		lastBytes := int64(0)
		lastSource := ""
		ticker := time.NewTicker(500 * time.Millisecond)
		defer ticker.Stop()
		for {
			select {
			case tk := <-progressCh:
				completed += tk.n
				if tk.src != "" { lastSource = tk.src }
			case <-ticker.C:
				elapsed := time.Since(last)
				speed := (completed - lastBytes) * int64(time.Second) / int64(elapsed)
				if progress != nil { progress(Progress{Total: total, Completed: completed, SpeedBps: speed, Source: lastSource}) }
				last = time.Now(); lastBytes = completed
			case <-done:
				if progress != nil { progress(Progress{Total: total, Completed: completed, SpeedBps: 0, Source: lastSource}) }
				return
			case <-ctx.Done():
				return
			}
		}
	}()

	// worker pool with retry
	type job struct {
		p        part
		attempts int
	}
	jobs := make(chan job, len(parts))
	for i, p := range parts {
		if meta.Parts[i].Done { continue }
		jobs <- job{p: p, attempts: 0}
	}
	close(jobs)

	worker := func() {
		defer wg.Done()
		for j := range jobs {
			// per-job source selection and retry across sources
			srcIdx := 0
			attempts := 0
			var err error
			for srcIdx < len(candidates) {
				u := candidates[srcIdx]
				err = d.downloadRange(ctx, u, j.p.start, j.p.end, f, progressCh)
				if err == nil { break }
				// retry on same source if allowed
				if d.opt.MaxRetries > 0 && attempts < d.opt.MaxRetries {
					backoff := time.Duration(d.opt.RetryBackoffSec) * time.Second
					if backoff <= 0 { backoff = 2 * time.Second }
					delay := backoff << attempts
					timer := time.NewTimer(delay)
					select {
					case <-ctx.Done():
						timer.Stop()
						errCh <- ctx.Err()
						return
					case <-timer.C:
						attempts++
						continue
					}
				}
				// switch to next source
				srcIdx++
				attempts = 0
			}
			if err != nil {
				errCh <- err
				return
			}
			// mark part done and persist meta
			// find index by range (safe because of unique partitioning)
			for i, pm := range meta.Parts {
				if pm.Start == j.p.start && pm.End == j.p.end {
					meta.Parts[i].Done = true
					break
				}
			}
			saveMeta(meta)
		}
	}
	// start workers equal to chunks
	for i := 0; i < chunks; i++ { wg.Add(1); go worker() }

	// wait
	go func() { wg.Wait(); close(done) }()

	// collect errors
	var firstErr error
	for i := 0; i < chunks; i++ {
		select {
		case e := <-errCh:
			if firstErr == nil { firstErr = e }
		case <-done:
			// finished
			i = chunks // break outer
		}
	}
	close(progressCh)
	// if all parts done, remove meta file
	allDone := true
	for _, pm := range meta.Parts { if !pm.Done { allDone = false; break } }
	if allDone { _ = os.Remove(metaPath) }
	return firstErr
}

func (d *Downloader) downloadRange(ctx context.Context, url string, start, end int64, f *os.File, progressCh chan<- progressTick) error {
	req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
	if err != nil { return err }
	req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
	resp, err := d.client.Do(req)
	if err != nil { return err }
	defer resp.Body.Close()
	if resp.StatusCode >= 400 { return fmt.Errorf("http %d", resp.StatusCode) }
	buf := make([]byte, 32*1024)
	offset := start
	for {
		n, readErr := resp.Body.Read(buf)
		if n > 0 {
			wn, werr := f.WriteAt(buf[:n], offset)
			if werr != nil { return werr }
			if wn != n { return errors.New("short write") }
			offset += int64(n)
			if progressCh != nil { progressCh <- progressTick{ n: int64(n), src: url } }
		}
		if readErr != nil {
			if readErr == io.EOF { return nil }
			return readErr
		}
	}
}

func (d *Downloader) headOn(url string) (total int64, acceptRange bool, etag string, lastMod string, err error) {
	req, err := http.NewRequest(http.MethodHead, url, nil)
	if err != nil { return -1, false, "", "", err }
	resp, err := d.client.Do(req)
	if err != nil { return -1, false, "", "", err }
	defer resp.Body.Close()
	if cl := resp.Header.Get("Content-Length"); cl != "" {
		if n, e := strconv.ParseInt(cl, 10, 64); e == nil { total = n } else { total = -1 }
	} else { total = -1 }
	if ar := resp.Header.Get("Accept-Ranges"); ar != "" && ar != "none" { acceptRange = true }
	etag = resp.Header.Get("ETag")
	lastMod = resp.Header.Get("Last-Modified")
	return
}
