package crawler

import (
	"encoding/json"
	"fmt"
	"net/http"
	"os"
	"regexp"
	"strings"
	"time"

	"github.com/PuerkitoBio/goquery"
)

// 文章数据结构
type Article struct {
	Title       string    `json:"title"`
	URL         string    `json:"url"`
	Content     string    `json:"content"`
	Answer      string    `json:"answer"`
	ImageData   []string  `json:"image_data"`
	PublishDate string    `json:"publish_date"`
	ReadCount   string    `json:"read_count"`
	Category    string    `json:"category"`
	CrawlTime   time.Time `json:"crawl_time"`
	SplitMarker string    `json:"split_marker"` // 记录使用的分割标记
}

// CategoryData 存储分类下的所有文章
type CategoryData struct {
	Category  string    `json:"category"`
	BaseURL   string    `json:"base_url"`
	Articles  []Article `json:"articles"`
	CrawlTime time.Time `json:"crawl_time"`
}

// Crawler 爬虫结构
type Crawler struct {
	BaseURL       string
	Category      string
	Client        *http.Client
	MaxArticles   int        // 最大爬取文章数量，0表示不限制
	NoSplitRecord []*Article // 记录未分割的文章
}

// NewCrawler 创建新的爬虫实例
func NewCrawler(baseURL, category string, maxArticles int) *Crawler {
	return &Crawler{
		BaseURL:  baseURL,
		Category: category,
		Client: &http.Client{
			Timeout: 30 * time.Second, // 增加超时时间
		},
		MaxArticles:   maxArticles,
		NoSplitRecord: []*Article{},
	}
}

// Crawl 开始爬取指定分类的数据
func (c *Crawler) Crawl() (*CategoryData, error) {
	fmt.Printf("开始爬取 %s 分类的数据...\n", c.Category)

	// 创建结果结构
	result := &CategoryData{
		Category:  c.Category,
		BaseURL:   c.BaseURL,
		Articles:  []Article{},
		CrawlTime: time.Now(),
	}

	// 首先找到该分类的URL
	categoryURL, err := c.findCategoryURL()
	if err != nil {
		return nil, fmt.Errorf("查找分类URL失败: %v", err)
	}

	fmt.Printf("找到分类 %s 的URL: %s\n", c.Category, categoryURL)

	// 爬取分类页面的所有分页
	var allLinks []string

	// 先爬取第一页
	links, nextPageURL, err := c.getArticleLinksFromPage(categoryURL)
	if err != nil {
		return nil, fmt.Errorf("获取文章链接失败: %v", err)
	}

	allLinks = append(allLinks, links...)

	// 如果有下一页且需要更多链接，继续爬取
	for nextPageURL != "" && (c.MaxArticles == 0 || len(allLinks) < c.MaxArticles) {
		fmt.Printf("爬取下一页: %s\n", nextPageURL)

		// 防止请求过于频繁
		time.Sleep(500 * time.Millisecond)

		links, nextURL, err := c.getArticleLinksFromPage(nextPageURL)
		if err != nil {
			fmt.Printf("爬取页面 %s 失败: %v\n", nextPageURL, err)
			break
		}

		allLinks = append(allLinks, links...)
		nextPageURL = nextURL

		// 限制最多爬取10页，避免无限循环
		if len(allLinks) > 200 {
			break
		}
	}

	// 如果有最大文章数限制，截取前N个链接
	if c.MaxArticles > 0 && len(allLinks) > c.MaxArticles {
		fmt.Printf("限制爬取文章数量为 %d\n", c.MaxArticles)
		allLinks = allLinks[:c.MaxArticles]
	}

	fmt.Printf("共找到 %d 个文章链接\n", len(allLinks))

	// 对每篇文章进行爬取
	for _, link := range allLinks {
		fmt.Printf("爬取文章: %s\n", link)

		article, err := c.getArticleData(link)
		if err != nil {
			fmt.Printf("爬取文章 %s 失败: %v\n", link, err)
			continue
		}

		// 如果未找到分割标记，记录到NoSplitRecord中
		if strings.Contains(article.SplitMarker, "无分割") {
			c.NoSplitRecord = append(c.NoSplitRecord, article)
		}

		// 添加到结果中
		result.Articles = append(result.Articles, *article)

		// 防止请求过于频繁
		time.Sleep(500 * time.Millisecond)
	}

	fmt.Printf("完成爬取 %s 分类，共获取 %d 篇文章\n", c.Category, len(result.Articles))
	fmt.Printf("其中未能分割的文章: %d 篇\n", len(c.NoSplitRecord))

	return result, nil
}

// findCategoryURL 查找分类对应的URL
func (c *Crawler) findCategoryURL() (string, error) {
	// 访问首页
	resp, err := c.Client.Get(c.BaseURL)
	if err != nil {
		return "", err
	}
	defer resp.Body.Close()

	// 解析HTML
	doc, err := goquery.NewDocumentFromReader(resp.Body)
	if err != nil {
		return "", err
	}

	fmt.Printf("查找分类: %s\n", c.Category)

	// 从maintm1类下的h3标签查找分类
	var categoryURL string
	var allCategories []string

	doc.Find(".maintm1 h3").Each(func(i int, h3 *goquery.Selection) {
		categoryName := strings.TrimSpace(h3.Text())
		allCategories = append(allCategories, categoryName)

		// 如果找到匹配的分类
		if strings.Contains(categoryName, c.Category) {
			// 查找该分类下的所有链接
			h3.Parent().Find("a").Each(func(j int, a *goquery.Selection) {
				if categoryURL == "" { // 只取第一个链接
					if href, exists := a.Attr("href"); exists && href != "" {
						// 构建完整URL
						if !strings.HasPrefix(href, "http") {
							if strings.HasPrefix(href, "/") {
								categoryURL = c.BaseURL + href
							} else {
								categoryURL = c.BaseURL + "/" + href
							}
						} else {
							categoryURL = href
						}
					}
				}
			})
		}
	})

	// 如果在maintm1中没找到分类，尝试查找header中的链接
	if categoryURL == "" {
		doc.Find("nav a, .nav a, .nav_ul a").Each(func(i int, s *goquery.Selection) {
			text := strings.TrimSpace(s.Text())
			if strings.Contains(text, c.Category) {
				href, exists := s.Attr("href")
				if exists && href != "" {
					// 构建完整URL
					if !strings.HasPrefix(href, "http") {
						if strings.HasPrefix(href, "/") {
							categoryURL = c.BaseURL + href
						} else {
							categoryURL = c.BaseURL + "/" + href
						}
					} else {
						categoryURL = href
					}
				}
			}
		})
	}

	// 如果仍然没找到，尝试在所有链接中查找
	if categoryURL == "" {
		doc.Find("a").Each(func(i int, s *goquery.Selection) {
			text := strings.TrimSpace(s.Text())
			href, exists := s.Attr("href")
			if exists && href != "" && strings.Contains(text, c.Category) {
				// 构建完整URL
				if !strings.HasPrefix(href, "http") {
					if strings.HasPrefix(href, "/") {
						categoryURL = c.BaseURL + href
					} else {
						categoryURL = c.BaseURL + "/" + href
					}
				} else {
					categoryURL = href
				}
			}
		})
	}

	// 如果还是没找到，使用预定义的映射
	if categoryURL == "" {
		// 预定义的分类URL映射
		mapping := map[string]string{
			"塔罗爱情": "zhanbuaiqing",
			"塔罗婚姻": "zhanbuhunyun",
			"塔罗事业": "zhanbushiye",
			"塔罗学业": "zhanbuxueye",
			"塔罗运势": "zhanbuyunshi",
			"塔罗健康": "zhanbujiankang",
			"塔罗测试": "tarot",
			"星座运势": "xingzuo",
		}

		if slug, ok := mapping[c.Category]; ok {
			categoryURL = fmt.Sprintf("%s/%s/", c.BaseURL, slug)
		} else {
			// 尝试移除"塔罗"或"星座"前缀后查找
			category := strings.TrimPrefix(c.Category, "塔罗")
			category = strings.TrimPrefix(category, "星座")

			subMapping := map[string]string{
				"爱情":   "zhanbuaiqing",
				"婚姻":   "zhanbuhunyun",
				"事业":   "zhanbushiye",
				"学业":   "zhanbuxueye",
				"运势":   "zhanbuyunshi",
				"健康":   "zhanbujiankang",
				"测试":   "tarot",
				"星座运势": "xingzuo",
			}

			if slug, ok := subMapping[category]; ok {
				categoryURL = fmt.Sprintf("%s/%s/", c.BaseURL, slug)
			} else if slug, ok := subMapping[c.Category]; ok {
				categoryURL = fmt.Sprintf("%s/%s/", c.BaseURL, slug)
			} else {
				categoryURL = fmt.Sprintf("%s/category/%s/", c.BaseURL, category)
			}
		}
	}

	return categoryURL, nil
}

// getArticleLinksFromPage 获取单个页面中的文章链接和下一页URL
func (c *Crawler) getArticleLinksFromPage(pageURL string) ([]string, string, error) {
	var links []string
	var nextPageURL string

	// 访问分类页面
	resp, err := c.Client.Get(pageURL)
	if err != nil {
		return nil, "", err
	}
	defer resp.Body.Close()

	// 解析HTML
	doc, err := goquery.NewDocumentFromReader(resp.Body)
	if err != nil {
		return nil, "", err
	}

	// 从newslist类中获取文章链接
	newsListFound := false
	doc.Find(".newslist, .list, ul.list").Each(func(i int, list *goquery.Selection) {
		newsListFound = true

		list.Find("a").Each(func(j int, a *goquery.Selection) {
			href, exists := a.Attr("href")
			if !exists || href == "" {
				return
			}

			// 过滤掉不相关的链接
			if !strings.Contains(href, "javascript:") && !strings.Contains(href, "#") && len(href) > 1 {
				// 构建完整URL
				if !strings.HasPrefix(href, "http") {
					if strings.HasPrefix(href, "/") {
						href = c.BaseURL + href
					} else {
						href = c.BaseURL + "/" + href
					}
				}

				// 确保是文章URL
				if isArticleURL(href) {
					links = append(links, href)
				}
			}
		})
	})

	// 如果没有找到newslist，尝试查找其他常见的文章列表容器
	if !newsListFound || len(links) == 0 {
		articleContainers := []string{
			".article-list", ".article-items", ".post-list", ".post-items",
			".content-list", ".list-box", ".article-box", ".blog-list",
			"ul.list", ".left_box", ".article", ".main-content",
		}

		for _, container := range articleContainers {
			doc.Find(container).Each(func(i int, el *goquery.Selection) {
				el.Find("a").Each(func(j int, a *goquery.Selection) {
					href, exists := a.Attr("href")
					if !exists || href == "" {
						return
					}

					if !strings.Contains(href, "javascript:") && !strings.Contains(href, "#") && len(href) > 1 {
						if !strings.HasPrefix(href, "http") {
							if strings.HasPrefix(href, "/") {
								href = c.BaseURL + href
							} else {
								href = c.BaseURL + "/" + href
							}
						}

						if isArticleURL(href) {
							links = append(links, href)
						}
					}
				})

				if len(links) > 0 {
					return
				}
			})

			if len(links) > 0 {
				break
			}
		}
	}

	// 如果仍未找到文章链接，尝试在页面所有链接中查找
	if len(links) == 0 {
		doc.Find("a").Each(func(i int, a *goquery.Selection) {
			href, exists := a.Attr("href")
			if !exists || href == "" {
				return
			}

			// 检查是否是文章链接
			if isArticleURL(href) {
				// 构建完整URL
				if !strings.HasPrefix(href, "http") {
					if strings.HasPrefix(href, "/") {
						href = c.BaseURL + href
					} else {
						href = c.BaseURL + "/" + href
					}
				}

				links = append(links, href)
			}
		})
	}

	// 去重
	links = removeDuplicates(links)

	// 查找下一页链接
	paginationSelectors := []string{
		".pagination a:contains('下一页')",
		".pages a:contains('下一页')",
		"a.next", ".next a",
		".pagination a[rel='next']",
		"a:contains('下一页')",
		"a:contains('Next')",
	}

	for _, selector := range paginationSelectors {
		nextLink := doc.Find(selector).First()
		if href, exists := nextLink.Attr("href"); exists && href != "" {
			// 构建完整URL
			if !strings.HasPrefix(href, "http") {
				if strings.HasPrefix(href, "/") {
					nextPageURL = c.BaseURL + href
				} else {
					nextPageURL = c.BaseURL + "/" + href
				}
			} else {
				nextPageURL = href
			}
			break
		}
	}

	return links, nextPageURL, nil
}

// isArticleURL 判断一个URL是否可能是文章链接
func isArticleURL(url string) bool {
	// 检查URL中的常见文章路径模式
	patterns := []string{
		"/article/", "/post/", "/view/", "/news/", "/blog/",
		"/content/", "/detail/", "/read/", "/info/",
		".html", ".htm", ".php", ".asp", ".aspx",
		"/zhanbu", "/taluo", "/xingzuo", "/yunshi",
	}

	for _, pattern := range patterns {
		if strings.Contains(url, pattern) {
			return true
		}
	}

	// 检查URL是否包含数字ID，这通常是文章ID
	re := regexp.MustCompile(`/\d+(/|\.html|$)`)
	if re.MatchString(url) {
		return true
	}

	// 塔罗网站特有的URL模式，如 202502093902.html
	re = regexp.MustCompile(`/\d{6,12}\.html?$`)
	if re.MatchString(url) {
		return true
	}

	return false
}

// getArticleData 爬取单篇文章的内容
func (c *Crawler) getArticleData(articleURL string) (*Article, error) {
	article := &Article{
		URL:       articleURL,
		Category:  c.Category,
		CrawlTime: time.Now(),
	}

	// 获取详情页
	res, err := c.Client.Get(articleURL)
	if err != nil {
		return article, err
	}
	defer res.Body.Close()

	if res.StatusCode != 200 {
		return article, fmt.Errorf("status code error: %d %s", res.StatusCode, res.Status)
	}

	// 加载HTML文档
	doc, err := goquery.NewDocumentFromReader(res.Body)
	if err != nil {
		return article, err
	}

	// 获取标题
	article.Title = strings.TrimSpace(doc.Find("h1.article-title").Text())

	// 获取阅读量
	article.ReadCount = strings.TrimSpace(doc.Find("div.article-meta span.read-count").Text())

	// 获取发布时间
	article.PublishDate = strings.TrimSpace(doc.Find("div.article-meta span.publish-time").Text())

	// 获取文章主体内容
	var articleContent string
	doc.Find("div.article-content").Each(func(i int, s *goquery.Selection) {
		articleContent, _ = s.Html()
	})

	// 获取所有图片URL
	var imageURLs []string
	doc.Find("div.article-content img").Each(func(i int, s *goquery.Selection) {
		if src, exists := s.Attr("src"); exists {
			imageURLs = append(imageURLs, src)
		}
	})
	article.ImageData = imageURLs

	// 分割内容的逻辑
	doc2, err := goquery.NewDocumentFromReader(strings.NewReader(articleContent))
	if err != nil {
		return article, err
	}

	// 1. 优先使用文本标记进行分割
	answerMarkers := []string{"塔罗牌解析", "揭晓答案", "下翻看答案", "答案揭晓", "下拉看答案"}
	foundMarker := false

	for _, marker := range answerMarkers {
		if strings.Contains(articleContent, marker) {
			parts := strings.Split(articleContent, marker)
			article.Content = strings.TrimSpace(parts[0])
			article.Answer = strings.TrimSpace(parts[1])
			article.SplitMarker = marker
			foundMarker = true
			break
		}
	}

	// 2. 如果没有找到文本标记，尝试使用居中的p标签进行分割
	if !foundMarker {
		// 查找所有的居中p标签
		var centeredPTags []string
		doc2.Find("p").Each(func(i int, s *goquery.Selection) {
			styleAttr, exists := s.Attr("style")
			if exists && strings.Contains(styleAttr, "text-align: center") {
				html, _ := s.Html()
				centeredPTags = append(centeredPTags, html)
			}
		})

		// 若找到居中的p标签
		if len(centeredPTags) > 0 {
			// 获取最后一个居中p标签的HTML内容
			lastCenteredPContent := centeredPTags[len(centeredPTags)-1]

			// 构建完整的标签
			lastCenteredPTag := fmt.Sprintf(`<p style="text-align: center;">%s</p>`, lastCenteredPContent)

			// 检查这个标签是否在原内容中
			if strings.Contains(articleContent, lastCenteredPTag) {
				// 按最后一个居中p标签分割内容
				parts := strings.Split(articleContent, lastCenteredPTag)

				if len(parts) > 1 {
					article.Content = strings.TrimSpace(parts[0])
					article.Answer = strings.TrimSpace(parts[1])
					article.SplitMarker = fmt.Sprintf("居中的p标签: %s", lastCenteredPContent)
				} else {
					article.Content = articleContent
					article.Answer = ""
					article.SplitMarker = "无分割(找到标签但未成功分割)"
				}
			} else {
				article.Content = articleContent
				article.Answer = ""
				article.SplitMarker = "无分割(标签构建失败)"
			}
		} else {
			article.Content = articleContent
			article.Answer = ""
			article.SplitMarker = "无分割(未找到任何分割标记)"
		}
	}

	return article, nil
}

// removeDuplicates 从字符串切片中去除重复项
func removeDuplicates(strSlice []string) []string {
	keys := make(map[string]bool)
	list := []string{}
	for _, item := range strSlice {
		if _, value := keys[item]; !value {
			keys[item] = true
			list = append(list, item)
		}
	}
	return list
}

// SaveToJSON 将数据保存到JSON文件
func SaveToJSON(data *CategoryData, filename string) error {
	// 将数据转换为JSON格式
	jsonData, err := json.MarshalIndent(data, "", "  ")
	if err != nil {
		return err
	}

	// 写入文件
	return os.WriteFile(filename, jsonData, 0644)
}

// SaveNoSplitToJSON 将未分割文章保存到单独的JSON文件
func (c *Crawler) SaveNoSplitToJSON(filename string) error {
	if len(c.NoSplitRecord) == 0 {
		return nil // 没有未分割的文章，无需保存
	}

	// 创建保存未分割文章的数据结构
	noSplitData := &struct {
		Category      string     `json:"category"`
		BaseURL       string     `json:"base_url"`
		Articles      []*Article `json:"articles"`
		CrawlTime     time.Time  `json:"crawl_time"`
		TotalArticles int        `json:"total_articles"`
	}{
		Category:      c.Category,
		BaseURL:       c.BaseURL,
		Articles:      c.NoSplitRecord,
		CrawlTime:     time.Now(),
		TotalArticles: len(c.NoSplitRecord),
	}

	// 将数据转换为JSON格式
	jsonData, err := json.MarshalIndent(noSplitData, "", "  ")
	if err != nil {
		return err
	}

	// 写入文件
	return os.WriteFile(filename, jsonData, 0644)
}
