package crawler

import (
	"regexp"
	"sync"
	"time"

	"github.com/gocolly/colly"

	"strings"

	"io/ioutil"
	"net/http"
	"net/url"

	htmlmd "github.com/JohannesKaufmann/html-to-markdown"
	"github.com/temoto/robotstxt"
)

// Logger 日志接口
// 用户可自定义实现
type Logger interface {
	Info(format string, args ...interface{})
	Error(format string, args ...interface{})
}

// PageResult 单页抓取结果
type PageResult struct {
	URL       string
	FetchedAt time.Time
	HTML      string
	Markdown  string
	Title     string
}

// Option 函数式配置
type Option func(*Crawler)

// Crawler 抓取器
type Crawler struct {
	maxCount          int
	maxDepth          int
	delay             time.Duration
	concurrency       int
	sameDomainOnly    bool
	headers           map[string]string
	retry             int
	logger            Logger
	convertToMarkdown bool
	allowURLRevisit   bool // 是否允许重复访问同一个URL
	async             bool // 是否启用异步处理模式

	userAgentPool   []string
	randomUserAgent bool
	robotsGroup     *robotstxt.Group // robots.txt 规则
	robotsOnce      sync.Once        // 只加载一次
}

// NewCrawler 构造函数，支持 Option 模式
func NewCrawler(opts ...Option) *Crawler {
	c := &Crawler{
		headers:           make(map[string]string),
		convertToMarkdown: false,
		userAgentPool:     defaultUserAgentPool(),
	}
	for _, opt := range opts {
		opt(c)
	}
	return c
}

// PageHandler 用户自定义的单页处理回调
// 返回 error 可用于统计或中断
type PageHandler func(result PageResult) error

// Crawl 启动抓取，底层用 colly 实现递归、深度、延迟、并发等
func (c *Crawler) Crawl(startURL string) ([]PageResult, error) {
	results := make([]PageResult, 0)
	err := c.crawlCore(startURL, func(res PageResult) error {
		results = append(results, res)
		return nil
	})
	return results, err
}

// CrawlWithHandler 边抓边处理，抓到一页就调用 handler
func (c *Crawler) CrawlWithHandler(startURL string, handler PageHandler) error {
	return c.crawlCore(startURL, handler)
}

// crawlCore 公共抓取核心逻辑，onPage 回调每页结果
func (c *Crawler) crawlCore(startURL string, onPage func(PageResult) error) error {
	var mu sync.Mutex
	pageCount := 0
	var handlerErrs []error

	// 解析 startURL，提取 path 前缀
	u, err := url.Parse(startURL)
	if err != nil {
		return err
	}
	pathPrefix := u.Path
	if pathPrefix == "" {
		pathPrefix = "/"
	}
	pattern := "^" + regexp.QuoteMeta(u.Scheme+"://"+u.Host+pathPrefix)
	re := regexp.MustCompile(pattern)

	// 根据 async 配置创建 collector
	var collector *colly.Collector
	if c.async {
		collector = colly.NewCollector(
			colly.Async(true),
			colly.AllowedDomains(u.Host),
			colly.URLFilters(re),
		)
	} else {
		collector = colly.NewCollector(
			colly.AllowedDomains(u.Host),
			colly.URLFilters(re),
		)
	}

	if c.sameDomainOnly {
		collector.AllowedDomains = []string{u.Host}
	}

	if c.concurrency > 0 || c.delay > 0 {
		collector.Limit(&colly.LimitRule{
			DomainGlob:  "*",
			Parallelism: c.concurrency,
			Delay:       c.delay,
		})
	}

	for k, v := range c.headers {
		collector.OnRequest(func(r *colly.Request) {
			if c.randomUserAgent && len(c.userAgentPool) > 0 {
				r.Headers.Set("User-Agent", pickRandomUserAgent(c.userAgentPool))
			} else {
				r.Headers.Set(k, v)
			}
		})
	}

	if c.randomUserAgent && len(c.userAgentPool) > 0 && c.headers["User-Agent"] == "" {
		collector.OnRequest(func(r *colly.Request) {
			r.Headers.Set("User-Agent", pickRandomUserAgent(c.userAgentPool))
		})
	}

	collector.OnRequest(func(r *colly.Request) {
		if c.logger != nil {
			c.logger.Info("Visiting %s", r.URL.String())
		}
	})
	collector.OnError(func(r *colly.Response, err error) {
		if c.logger != nil {
			c.logger.Error("Error on %s: %v", r.Request.URL.String(), err)
		}
	})
	collector.OnResponse(func(r *colly.Response) {
		mu.Lock()
		defer mu.Unlock()
		if c.maxCount > 0 && pageCount >= c.maxCount {
			return
		}
		pageCount++

		htmlStr := string(r.Body)
		md := ""
		if c.convertToMarkdown {
			converter := htmlmd.NewConverter("", true, nil)
			md, _ = converter.ConvertString(htmlStr)
		}
		title := extractTitle(htmlStr)
		result := PageResult{
			URL:       r.Request.URL.String(),
			FetchedAt: time.Now(),
			HTML:      htmlStr,
			Markdown:  md,
			Title:     title,
		}
		if onPage != nil {
			err := onPage(result)
			if err != nil {
				handlerErrs = append(handlerErrs, err)
			}
		}
	})

	c.robotsOnce.Do(func() {
		domain := u.Host
		robotsURL := "https://" + domain + "/robots.txt"
		if strings.HasPrefix(startURL, "http://") {
			robotsURL = "http://" + domain + "/robots.txt"
		}
		resp, err := http.Get(robotsURL)
		if err == nil && resp.StatusCode == 200 {
			defer resp.Body.Close()
			body, _ := ioutil.ReadAll(resp.Body)
			robots, err := robotstxt.FromBytes(body)
			if err == nil {
				ua := "*"
				if len(c.userAgentPool) > 0 {
					ua = c.userAgentPool[0]
				}
				c.robotsGroup = robots.FindGroup(ua)
			}
		}
	})

	collector.OnHTML("a[href]", func(e *colly.HTMLElement) {
		if c.maxCount > 0 && pageCount >= c.maxCount {
			return
		}
		href := e.Request.AbsoluteURL(e.Attr("href"))
		if href != "" {
			if c.robotsGroup != nil {
				var u2 *url.URL
				var err error
				u2, err = url.Parse(href)
				if err == nil && c.robotsGroup.Test(u2.Path) {
					e.Request.Visit(href)
				}
			} else {
				e.Request.Visit(href)
			}
		}
	})

	err = collector.Visit(startURL)
	collector.Wait()
	if len(handlerErrs) > 0 {
		return handlerErrs[0] // 只返回第一个错误，或可自定义
	}
	return err
}

// getDomain 提取域名
func getDomain(rawurl string) string {
	start := 0
	if idx := len("http://"); len(rawurl) > idx && rawurl[:idx] == "http://" {
		start = idx
	} else if idx := len("https://"); len(rawurl) > idx && rawurl[:idx] == "https://" {
		start = idx
	}
	end := start
	for end < len(rawurl) && rawurl[end] != '/' && rawurl[end] != ':' {
		end++
	}
	return rawurl[start:end]
}

// 随机选取 UA
func pickRandomUserAgent(pool []string) string {
	if len(pool) == 0 {
		return ""
	}
	return pool[time.Now().UnixNano()%int64(len(pool))]
}

// 默认 UA pool
func defaultUserAgentPool() []string {
	return []string{
		"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
		"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15",
		"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
		"Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Mobile/15E148 Safari/604.1",
		"Mozilla/5.0 (Linux; Android 13; SM-S918B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36",
	}
}

// 提取 HTML title
func extractTitle(htmlStr string) string {
	start := strings.Index(strings.ToLower(htmlStr), "<title>")
	end := strings.Index(strings.ToLower(htmlStr), "</title>")
	if start != -1 && end != -1 && end > start {
		return strings.TrimSpace(htmlStr[start+len("<title>") : end])
	}
	return ""
}
