package rankCore

import (
	"fmt"
	"github.com/PuerkitoBio/goquery"
	"net/http"
	"regexp"
	"strconv"
	"strings"
)

/*
	将SPONSORED的排名算在一起
 */

type SinglePageRes struct {  //单个页面的处理结果
	itemNumber    string
	keyword       string
	normalRank    int
	sponsoredRank int
	totalNum      int
	currPageNum   int
}
type FinalRank struct { //单个关键词的排名结果
	ItemNumber      string
	Keyword         string
	Title           string
	ImgUrl          string
	NormalRank      int
	PageOfNormal    int
	SponsoredRank   int
	PageOfSponsored int
}

//var AllFinalRankData map[string][]*FinalRank
var AllFinalRankData []*FinalRank // 用于收集最终所有item所有关键词的排名结果, 最后由rank控制器直接调取结果返回给浏览器

func getImgAndTitle(itemNumber string) (imgUrl, title string, ) { //获取item的标题和图片
	url := "https://ebay.com/itm/" + itemNumber
	resp, err := http.Get(url)
	if err != nil {
		fmt.Println("httpGetItemImgAndTitle Err:", err)
		return
	}
	defer resp.Body.Close()
	buf := make([]byte, 1024*4)
	result := ""
	for {
		n, _ := resp.Body.Read(buf)
		result += string(buf[:n])
		if n == 0 {
			break
		}
	}
	doc, _ := goquery.NewDocumentFromReader(strings.NewReader(result))
	imgUrl, exsits := doc.Find("#icImg").Attr("src")
	if !exsits {
		fmt.Println("cannot find imgUrl of ", itemNumber)
	}
	title ,_ = doc.Find("#itemTitle").Html()
	reg := regexp.MustCompile(`<span[\d\D]*</span>`)
	title = reg.ReplaceAllString(title, "")
	fmt.Println("title:", title )
	return
}
/*
   处理每页的结果, 算出item在该页面是否有排名，以及该页面具体排名
 */
func HandleResult(result string, itemNumber string) (normalRank, sponsoredRank, totalNum int) {
	normalRank = -1
	sponsoredRank = -1
	itemNumberString := "listingId=\"" + itemNumber + "\""
	doc, _ := goquery.NewDocumentFromReader(strings.NewReader(result))
	itemMap := make(map[int]string)
	totalNum = doc.Find("li[listingId]").Length()
	//totalNum = sele.Length()
	if !strings.Contains(result, itemNumberString) {
		return
	}
	doc.Find("li[listingId]").Each(func(i int, selection *goquery.Selection) {
		htmlStr, err := selection.Html()
		if err != nil {
			fmt.Println("selection.html Err:", err)
		}
		reg := regexp.MustCompile(`iid="(\d+)"`)
		res := reg.FindAllStringSubmatch(htmlStr, 1)
		itemMap[i] = res[0][1]
		x := 0
		if res[0][1] == itemNumber { //找到item
			// 确认是否是 SPONSORED
			if strings.Contains(htmlStr, "<div class=\"promoted-lv\"><span>SPONSORED</span></div>") {
				sponsoredRank = i + 1
			} else {
				normalRank = i + 1
			}
			x++
		}
	})
	return
}
/*
爬取一个页面的数据
 */
func SpiderOneUrl(url, itemNumber string, i int, keyword string, chSinglePageRes chan *SinglePageRes, singlePageRes *SinglePageRes) {
	resp, err := http.Get(url)
	if err != nil {
		fmt.Println("http.Get Err:", err)
		return
	}
	defer resp.Body.Close()

	buf := make([]byte, 1024*4)
	result := ""
	for {
		n, _ := resp.Body.Read(buf)
		result += string(buf[:n])
		if n == 0 {
			break
		}
	}
	normalRank, sponsoredRank, totalNumber := HandleResult(result, itemNumber)
	singlePageRes.normalRank = normalRank
	singlePageRes.sponsoredRank = sponsoredRank
	singlePageRes.totalNum = totalNumber
	singlePageRes.currPageNum = i
	chSinglePageRes <- singlePageRes
}

/*
计算该关键词 最终排名
 */

func calcFinalRank(finalRank *FinalRank, allPageData map[int]*SinglePageRes, pageNumberOfNormalRank, pageNumOfSponsoredRank int) {
	finalRank.NormalRank = 0
	finalRank.PageOfNormal = 0
	finalRank.SponsoredRank = 0
	finalRank.PageOfSponsored = 0
	if pageNumberOfNormalRank == 0 && pageNumOfSponsoredRank == 0 {
		return
	}
	finalRank.PageOfNormal = pageNumberOfNormalRank
	finalRank.PageOfSponsored = pageNumOfSponsoredRank
	for i, singleData := range allPageData {
		if pageNumberOfNormalRank != 0 {
			if i < pageNumberOfNormalRank {
				finalRank.NormalRank += singleData.totalNum
			} else if i == pageNumberOfNormalRank {
				finalRank.NormalRank += singleData.normalRank
			}
		}
		if pageNumOfSponsoredRank != 0 {
			if i < pageNumOfSponsoredRank {
				finalRank.SponsoredRank += singleData.totalNum
			} else if i == pageNumOfSponsoredRank {
				finalRank.SponsoredRank += singleData.sponsoredRank
			}
		}
	}
	return
}
/*
开始一个关键词的工作 爬取所有页面， 将每个页面的最后数据写入AllFinalRankData
 */
func DoSingleKeywordWork(keyword string, maxPage, numberPerPage int, itemNumber string, ch chan int) {
	pageNumOfNormalRank := 0
	pageNumOfSponsoredRank := 0
	allPageRes := make(map[int]*SinglePageRes)
	chSinglePageRes := make(chan *SinglePageRes)
	newKeyword := keyword
	if strings.Contains(keyword, " ") {
		newKeyword = strings.Replace(newKeyword, " ", "+", -1)
	}
	preUrl := "https://www.ebay.co.uk/sch/i.html?_from=R40&_fcid=3&_clu=2&_localstpos=LE38DX&_stpos=LE38DX&gbr=1"
	url := ""
	for i := 1; i <= maxPage; i++ {
		url = preUrl + "&_nkw=" + newKeyword + "&_pgn=" + strconv.Itoa(i) + "&_ipg=" + strconv.Itoa(numberPerPage)
		fmt.Printf("开始爬取关键词(%s)的第%d页:\n", keyword, i)
		singlePageRes := new(SinglePageRes)
		singlePageRes.keyword = keyword
		singlePageRes.itemNumber = itemNumber
		go SpiderOneUrl(url, itemNumber, i, keyword, chSinglePageRes, singlePageRes)
	}

	for j := 1; j <= maxPage; j++ {
		singlePageData := <-chSinglePageRes
		currPageNum := singlePageData.currPageNum
		if singlePageData.normalRank > 0 {
			pageNumOfNormalRank = currPageNum
		}
		if singlePageData.sponsoredRank > 0 {
			pageNumOfSponsoredRank = currPageNum
		}
		allPageRes[currPageNum] = singlePageData
	}
	finalRank := new(FinalRank)
	finalRank.ItemNumber = itemNumber
	finalRank.Keyword = keyword
	finalRank.ImgUrl = ""
	finalRank.Title = ""

	finalRank.ImgUrl, finalRank.Title = getImgAndTitle(itemNumber)

	calcFinalRank(finalRank, allPageRes, pageNumOfNormalRank, pageNumOfSponsoredRank)

	AllFinalRankData = append(AllFinalRankData, finalRank)

	//fmt.Printf("(keyword:%s)(itemNumber:%s)的排名为：%d 位,在第%d页\n", keyword, itemNumber, finalRank.NormalRank, finalRank.PageOfNormal)
	ch <- 1
}
