package parser

import (
	. "01-crawler-project/02/engine"
	"01-crawler-project/02/fetcher"
	"log"
	"net/http"
	"regexp"
)

var regex_userinfo_link = regexp.MustCompile(`<a href="(http://album.zhenai.com/u/\d+)" target="_blank">([^<]+[^>])</a>`)

var regex_next_link = regexp.MustCompile(`<a href="(http://www.zhenai.com/zhenghun/chengdu/\d+)">下一页</a>`)

func ParseCityUserInfo(contents [] byte) ParserResult {

	//解析单个用户详情的url
	result := parseUserinfoLink(string(contents))

	//查找下一页的url
	userinfo_next_link := regex_next_link.FindAllStringSubmatch(string(contents), -1)

	//循环调用..解析下一页
	for len(userinfo_next_link) > 0 && len(userinfo_next_link[0]) > 0 {

		request, _ := http.NewRequest(http.MethodGet, userinfo_next_link[0][1], nil)

		bytes, error := fetcher.Fetcher(request)

		if error != nil {

			log.Println("error : ", error)

			continue
		}

		parserResult := parseUserinfoLink(string(bytes))

		result.Items = append(result.Items, parserResult.Items)

		result.Requests = append(result.Requests, parserResult.Requests...)

		userinfo_next_link = regex_next_link.FindAllStringSubmatch(string(bytes), -1)

	}

	return result
}

func parseUserinfoLink(contents string) (result ParserResult) {

	userinfo_link := regex_userinfo_link.FindAllStringSubmatch(string(contents), -1)

	result.Requests = make([]Request, len(userinfo_link))

	//result.Items = make([] interface{}, len(userinfo_link))

	for k, v := range userinfo_link {

		request, _ := http.NewRequest(http.MethodGet, v[1], nil)

		result.Requests[k] = Request{HttpRequest: request, ParserFunc: ParseUserInfo}

		//result.Items[k] = v[2]

		//fmt.Println(k, v)
	}

	return result
}
