package main

import (
	"bufio"
	"context"
	"crypto/tls"
	"errors"
	"fmt"
	"io"
	"net/http"
	"net/http/cookiejar"
	"net/url"
	"os"
	"path/filepath"
	"strconv"
	"strings"
	"sync"
	"time"
)

type downloadWorker struct {
	ctx    context.Context
	cancel context.CancelFunc
	wg     sync.WaitGroup
}

// simple token-bucket limiter shared by segments
type limiter struct {
	mu         sync.Mutex
	capacity   int64 // tokens per second
	tokens     int64
	lastRefill time.Time
}

func newLimiter(bps int64) *limiter {
	if bps <= 0 {
		return nil
	}
	return &limiter{capacity: bps, tokens: bps, lastRefill: time.Now()}
}

func (l *limiter) consume(n int64) {
	if l == nil || n <= 0 {
		return
	}
	for {
		l.mu.Lock()
		now := time.Now()
		elapsed := now.Sub(l.lastRefill).Seconds()
		if elapsed > 0 {
			add := int64(elapsed * float64(l.capacity))
			if add > 0 {
				l.tokens += add
				if l.tokens > l.capacity {
					l.tokens = l.capacity
				}
				l.lastRefill = now
			}
		}
		if l.tokens >= n {
			l.tokens -= n
			l.mu.Unlock()
			return
		}
		need := n - l.tokens
		// time to wait for needed tokens
		waitSec := float64(need) / float64(l.capacity)
		l.mu.Unlock()
		time.Sleep(time.Duration(waitSec*1000) * time.Millisecond)
	}
}

func defaultDownloadDir() string {
	home, err := os.UserHomeDir()
	if err != nil {
		return "."
	}
	// Windows/Linux 通用的 Downloads 目录名称
	cand := filepath.Join(home, "Downloads")
	if st, err := os.Stat(cand); err == nil && st.IsDir() {
		return cand
	}
	return home
}

func (m *Manager) startDownload(id string) error {
	m.mu.Lock()
	t, ok := m.tasks[id]
	if !ok {
		m.mu.Unlock()
		return errors.New("task not found")
	}
	t.AddLog("info", "开始下载")
	if t.SavePath == "" {
		// 采用设置中的默认保存目录，若为空则回退到系统 Downloads
		dir := m.defaultSavePath
		if dir == "" {
			dir = defaultDownloadDir()
		}
		// 确保目录存在
		_ = os.MkdirAll(dir, 0755)
		// 避免重名：如果存在同名文件，追加 (n)
		base := t.Name
		save := filepath.Join(dir, base)
		if st, err := os.Stat(save); err == nil && st.Mode().IsRegular() {
			ext := filepath.Ext(base)
			nameOnly := strings.TrimSuffix(base, ext)
			for i := 1; ; i++ {
				cand := fmt.Sprintf("%s(%d)%s", nameOnly, i, ext)
				try := filepath.Join(dir, cand)
				if _, err := os.Stat(try); os.IsNotExist(err) {
					save = try
					break
				}
			}
		}
		t.SavePath = save
	} else {
		// 若指定了保存路径，也确保其目录存在
		_ = os.MkdirAll(filepath.Dir(t.SavePath), 0755)
	}
	// 若已有在跑，先不重复启动
	if m.workers == nil {
		m.workers = make(map[string]*downloadWorker)
	}
	if _, exists := m.workers[id]; exists {
		m.mu.Unlock()
		return nil
	}
	// 设置状态
	t.Status = StatusDownloading
	m.mu.Unlock()
	m.emitUpdate()

	ctx, cancel := context.WithCancel(context.Background())
	w := &downloadWorker{ctx: ctx, cancel: cancel}
	w.wg.Add(1)
	m.mu.Lock()
	m.workers[id] = w
	// speed limiter per task (shared across segments)
	var lim *limiter
	if m.speedLimitB > 0 {
		lim = newLimiter(m.speedLimitB)
	}
	m.mu.Unlock()

	go func() {
		defer w.wg.Done()
		defer func() {
			m.mu.Lock()
			delete(m.workers, id)
			m.mu.Unlock()
			// worker 结束统一收尾：归还并发并调度下一批
			m.onWorkerDone()
		}()

		// 断点信息
		var startAt int64 = 0
		if fi, err := os.Stat(t.SavePath); err == nil {
			startAt = fi.Size()
		}

		// HTTP 客户端（含代理），并配合 CookieJar 与重定向时保留关键请求头
		tr := &http.Transport{
			Proxy:           http.ProxyFromEnvironment, // 默认使用环境变量中的代理设置
			TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12},
		}
		if m.proxy != "" {
			if pu, err := url.Parse(m.proxy); err == nil {
				tr.Proxy = http.ProxyURL(pu)
			}
		}
		jar, _ := cookiejar.New(nil)
		client := &http.Client{
			Transport: tr,
			Timeout:   30 * time.Second,
			Jar:       jar,
		}
		// 保留 Range、UA 等请求头，且为后续请求设置合适的 Referer
		client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
			if len(via) == 0 {
				return nil
			}
			prev := via[len(via)-1]
			// 复制关键头
			for k, vs := range prev.Header {
				if _, ok := req.Header[k]; !ok {
					for _, v := range vs {
						req.Header.Add(k, v)
					}
				}
			}
			// 更新 Referer 指向上一个请求的主机
			if u, err := url.Parse(prev.URL.String()); err == nil {
				req.Header.Set("Referer", u.Scheme+"://"+u.Host+"/")
			}
			return nil
		}

		// 探测 Range 支持和总大小
		total, rangeOK := int64(0), false
		{
			// 优先 HEAD
			headReq, _ := http.NewRequestWithContext(ctx, http.MethodHead, t.URL, nil)
			if resp, err := client.Do(headReq); err == nil {
				if cl := resp.Header.Get("Content-Length"); cl != "" {
					if v, err := strconv.ParseInt(cl, 10, 64); err == nil {
						total = v
					}
				}
				if ar := resp.Header.Get("Accept-Ranges"); strings.Contains(strings.ToLower(ar), "bytes") {
					rangeOK = true
				}
				resp.Body.Close()
			}
		}
		// 如果 HEAD 不可靠，则用 GET 初探
		if total == 0 {
			req0, _ := http.NewRequestWithContext(ctx, http.MethodGet, t.URL, nil)
			resp0, err := client.Do(req0)
			if err != nil {
				if ctx.Err() != nil {
					return
				}
				m.failTask(id, fmt.Errorf("http get probe: %w", err))
				return
			}
			if cl := resp0.Header.Get("Content-Length"); cl != "" {
				if v, err := strconv.ParseInt(cl, 10, 64); err == nil {
					total = v
				}
			}
			// Accept-Ranges 也在这里看一下
			if ar := resp0.Header.Get("Accept-Ranges"); strings.Contains(strings.ToLower(ar), "bytes") {
				rangeOK = true
			}
			// 如果我们用 GET 探测，需要关闭以便后续正式请求
			resp0.Body.Close()
		}
		// 进一步验证：做一次 0-0 的 Range 请求，确认服务端确实返回 206 与 Content-Range
		{
			testReq, _ := http.NewRequestWithContext(ctx, http.MethodGet, t.URL, nil)
			testReq.Header.Set("Range", "bytes=0-0")
			testResp, err := client.Do(testReq)
			if err == nil {
				if testResp.StatusCode == http.StatusPartialContent && testResp.Header.Get("Content-Range") != "" {
					rangeOK = true
				} else {
					// 某些站点返回 200，视为不支持可靠 Range
					rangeOK = false
				}
				testResp.Body.Close()
			}
		}

		// 初始化任务大小与已下载
		m.mu.Lock()
		if total > 0 {
			t.Size = total
		}
		t.Downloaded = startAt
		m.mu.Unlock()
		m.emitUpdate()

		        // 起文件
        f, err := os.OpenFile(t.SavePath, os.O_CREATE|os.O_WRONLY, 0644)
        if err != nil {
            m.failTask(id, fmt.Errorf("open file: %w", err))
            return
        }
        defer f.Close()
        // 若是全新下载（无断点），确保文件被截断为 0，避免旧文件尾部残留
        if startAt == 0 {
            _ = f.Truncate(0)
        }

		// 速度统计
		lastTick := time.Now()
		var bytesSinceLast int64 = 0
		var bmu sync.Mutex
		updateTicker := time.NewTicker(500 * time.Millisecond)
		defer updateTicker.Stop()

		// 统计 goroutine
		done := make(chan struct{})
		go func() {
			for {
				select {
				case <-ctx.Done():
					m.mu.Lock()
					t.Status = StatusPaused
					t.Speed = 0
					m.mu.Unlock()
					m.emitUpdate()
					return
				case <-updateTicker.C:
					now := time.Now()
					elapsed := now.Sub(lastTick).Seconds()
					if elapsed > 0 {
						bmu.Lock()
						speed := int64(float64(bytesSinceLast) / elapsed)
						bytesSinceLast = 0
						bmu.Unlock()
						m.mu.Lock()
						t.Speed = speed
						if t.Size > 0 {
							remain := t.Size - t.Downloaded
							if speed > 0 {
								t.ETA = int64(float64(remain) / float64(speed))
							} else {
								t.ETA = 0
							}
						}
						m.mu.Unlock()
						m.emitUpdate()
						lastTick = now
					}
				case <-done:
					return
				}
			}
		}()

		// helper: single stream (for resume or no-range)
		// strictRange: 当为 true 且服务端对 Range 返回 200 时，视为错误（用于分段下载）
		singleStream := func(rangeHeader string, writeOffset int64, strictRange bool) error {
			req, err := http.NewRequestWithContext(ctx, http.MethodGet, t.URL, nil)
			if err != nil {
				return err
			}
			// 添加请求头（避免压缩，确保字节流与长度一致）
			req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
			req.Header.Set("Accept", "*/*")
			req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8")
			// 强制要求原始字节流，防止服务器压缩导致断点/大小计算异常
			req.Header.Set("Accept-Encoding", "identity")
			req.Header.Set("Connection", "keep-alive")

			// 添加 Referer 头
			if u, err := url.Parse(t.URL); err == nil {
				req.Header.Set("Referer", u.Scheme+"://"+u.Host+"/")
			}

			// 使用外层 client（带代理、Jar 与重定向策略）
			if rangeHeader != "" {
				req.Header.Set("Range", rangeHeader)
			}
			resp, err := client.Do(req)
			if err != nil {
				return err
			}
			if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
				defer resp.Body.Close()
				return fmt.Errorf("unexpected status: %s", resp.Status)
			}
            // 根据响应头更新任务总大小（优先 Content-Range，其次 Content-Length）
            if resp.StatusCode == http.StatusPartialContent {
                if cr := resp.Header.Get("Content-Range"); cr != "" {
                    // 形如: bytes START-END/TOTAL
                    if p := strings.LastIndex(cr, "/"); p > 0 {
                        if tot, err := strconv.ParseInt(strings.TrimSpace(cr[p+1:]), 10, 64); err == nil && tot > 0 {
                            m.mu.Lock()
                            if t.Size == 0 {
                                t.Size = tot
                            }
                            m.mu.Unlock()
                        }
                    }
                }
            } else if resp.StatusCode == http.StatusOK {
                if cl := resp.Header.Get("Content-Length"); cl != "" {
                    if v, err := strconv.ParseInt(cl, 10, 64); err == nil && v > 0 {
                        m.mu.Lock()
                        if t.Size == 0 {
                            t.Size = v
                        }
                        m.mu.Unlock()
                    }
                }
            }
			// 首次响应可从 Content-Disposition 推断文件名
			if t.Name == "" {
				if cd := resp.Header.Get("Content-Disposition"); cd != "" {
					// 朴素解析 filename="..." 或 filename=...
					if i := strings.Index(strings.ToLower(cd), "filename="); i >= 0 {
						fn := cd[i+9:]
						fn = strings.Trim(fn, "\"'")
						if fn != "" {
							m.mu.Lock()
							t.Name = fn
							m.mu.Unlock()
						}
					}
				}
			}
			// 如果请求了 Range 但服务端返回 200
			if rangeHeader != "" && resp.StatusCode == http.StatusOK {
				if strictRange {
					// 分段模式下严格要求 206，避免覆盖文件导致损坏
					resp.Body.Close()
					return fmt.Errorf("server ignored range request")
				}
				// 非严格模式（断点续传单流）允许回退为全量重下
				if err := f.Truncate(0); err == nil {
					writeOffset = 0
					m.mu.Lock()
					t.Downloaded = 0
					m.mu.Unlock()
				}
			}
			reader := bufio.NewReaderSize(resp.Body, 256*1024)
			pos := writeOffset
			for {
				chunk := make([]byte, 128*1024)
				n, er := reader.Read(chunk)
				if n > 0 {
					if lim != nil {
						lim.consume(int64(n))
					}
					if _, ew := f.WriteAt(chunk[:n], pos); ew != nil {
						resp.Body.Close()
						return fmt.Errorf("write file: %w", ew)
					}
					pos += int64(n)
					bmu.Lock()
					bytesSinceLast += int64(n)
					bmu.Unlock()
					m.mu.Lock()
					t.Downloaded += int64(n)
					m.mu.Unlock()
				}
				if er == io.EOF {
					resp.Body.Close()
					return nil
				}
				if er != nil {
					resp.Body.Close()
					return er
				}
				if ctx.Err() != nil {
					resp.Body.Close()
					return ctx.Err()
				}
			}
		}

		// 并发分段：仅当支持 Range 且从头开始，且总大小已知且分段数>1
		segs := m.maxSegments
		if segs < 1 {
			segs = 1
		}
		useSegments := rangeOK && startAt == 0 && t.Size > 0 && segs > 1
		if !useSegments {
			// 单流：若有断点，则使用 Range 从断点继续
			var rh string
			if startAt > 0 {
				rh = fmt.Sprintf("bytes=%d-", startAt)
			}
			if err := singleStream(rh, startAt, false); err != nil {
				if ctx.Err() != nil {
					return
				}
				m.failTask(id, fmt.Errorf("download: %w", err))
				return
			}
		} else {
			// 计算分段范围
			partSize := t.Size / int64(segs)
			var wg sync.WaitGroup
			for i := 0; i < segs; i++ {
				start := int64(i) * partSize
				end := start + partSize - 1
				if i == segs-1 {
					end = t.Size - 1
				}
				wg.Add(1)
				go func(s, e int64) {
					defer wg.Done()
					rh := fmt.Sprintf("bytes=%d-%d", s, e)
					if err := singleStream(rh, s, true); err != nil {
						if ctx.Err() != nil {
							return
						}
						m.failTask(id, fmt.Errorf("segment %d-%d: %w", s, e, err))
					}
				}(start, end)
			}
			wg.Wait()
			if ctx.Err() != nil {
				return
			}
		}

		close(done)
		// 完成或暂停/失败后的最终状态更新
		m.mu.Lock()
		if ctx.Err() != nil {
			// 被取消/暂停
			if t.Status != StatusFailed { // 保持失败状态不被覆盖
				t.Status = StatusPaused
				t.Speed = 0
			}
		} else {
			// 未被取消：认为传输流程正常结束
			completed := (t.Size > 0 && t.Downloaded >= t.Size)
			if completed {
				t.Status = StatusCompleted
				t.Speed = 0
				t.ETA = 0
			} else if t.Size > 0 && t.Downloaded < t.Size {
				// 未达到声明大小，视为失败，由 failTask 负责重试
				m.mu.Unlock()
				m.failTask(id, fmt.Errorf("incomplete: %d/%d", t.Downloaded, t.Size))
				return
			}
		}
		m.mu.Unlock()
		m.emitUpdate()
        if t.Status == StatusCompleted {
            t.AddLog("info", "下载完成")
        }
	}()
	return nil
}

func (m *Manager) failTask(id string, err error) {
	m.mu.Lock()
	t, ok := m.tasks[id]
	if ok {
		t.Status = StatusFailed
		t.AddLog("error", fmt.Sprintf("任务失败: %v", err))
		t.Status = StatusFailed
		t.Speed = 0
		t.ETA = 0
		t.RetryCount++
		retry := t.RetryCount
		max := m.retryMax
		m.mu.Unlock()
		m.emitUpdate()
		fmt.Println("task failed:", err)
		// 失败重试（轻度退避）：retry <= max 时将任务放回等待队列
		if retry <= max {
			go func(delay time.Duration) {
				time.Sleep(delay)
				m.mu.Lock()
				if tt, ok2 := m.tasks[id]; ok2 && tt.Status == StatusFailed && tt.RetryCount == retry {
					tt.Status = StatusWaiting
				}
				m.mu.Unlock()
				m.emitUpdate()
				m.schedule()
			}(time.Duration(retry) * time.Second)
		}
		return
	}
	m.mu.Unlock()
	m.emitUpdate()
	fmt.Println("task failed (unknown):", err)
}
