package page_processer

import (
	"bytes"
	"strings"

	"dense_spider/core/common/page"
	"dense_spider/core/common/util"
	"dense_spider/core/spider"
	"dense_spider/tools/xpath"
)

type HtmlPageProcesser struct {
}

func NewHtmlPageProcesser() *HtmlPageProcesser {
	return &HtmlPageProcesser{}
}
func (this *HtmlPageProcesser) Process(spider *spider.Spider, p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	body := p.GetBodyStr()
	if body == "" {
		return
	}
	node, err := xpath.ParseHTML(bytes.NewBuffer([]byte(body)))
	if err != nil {

		panic(err)
	}
	//如果已经达到爬虫最大采集层级了
	if p.GetRequest().GetDepth() < spider.GetMaxCrawlDepth() {
		linksXpath, err := xpath.Compile("//a/@href")
		if err != nil {

			panic(err)
		}
		links := linksXpath.Iter(node)
		var cuDepth = p.GetRequest().GetDepth() + 1

		for links.Next() {
			link := links.Node().String()

			normalizeUrl := util.NormalizeUrl(link, p.GetRequest().GetUrl())

			if normalizeUrl == "" {
				continue
			}
			spider.AppendUrl(normalizeUrl, cuDepth)

			body = strings.Replace(body, link, normalizeUrl, -1)
		}
		linksXpath, err = xpath.Compile("//img/@src")
		if err != nil {

			panic(err)
		}
		links = linksXpath.Iter(node)

		for links.Next() {
			link := links.Node().String()

			normalizeUrl := util.NormalizeUrl(link, p.GetRequest().GetUrl())

			if normalizeUrl == "" {
				continue
			}
			spider.AppendUrl(normalizeUrl, cuDepth)
			body = strings.Replace(body, link, normalizeUrl, -1)
		}

	}

	if spider.GetExtractRules() != nil && len(spider.GetExtractRules()) > 0 {
		for i := 0; i < len(spider.GetExtractRules()); i++ {
			extractRule := spider.GetExtractRules()[i]

			xpathRule, err := xpath.Compile(extractRule.GetXpath())
			if err != nil {
				panic(err)
			}
			xpathValue, ok := xpathRule.String(node)
			if ok {
				p.AddField(extractRule.GetName(), xpathValue)
			}

		}
	}
	//1. 抽取链接
	//2. 遍历链接，补全链接
	//3. 链接加入请求

	//4. 抽取指定规则的内容

}
