package parser

import (
	"fmt"
	"learn/crawler/engine"
	"learn/crawler/model"
	"learn/crawler/util"
	"regexp"
)

var listRe = regexp.MustCompile(`<li class="con_list_item default_list" data-index="[\d]+" data-positionid="([\d]+)" data-salary="([^"]+)" data-company="([^"]+)" data-positionname="([^"]+)"[^>]*>[\s\S]*?</li>`)
var addrRe = regexp.MustCompile(`<span class="add">\[<em>([^<]+)</em>\]</span>`)
var timeRe = regexp.MustCompile(`<span class="format-time">([^<]+)</span>`)
var workerTimeRe = regexp.MustCompile(`<span class="money">[^<]+</span><!--<i></i>-->([^<]+)`)

var pageRe = regexp.MustCompile(`<a href="(https://www.lagou.com/zhaopin/[^"]+)" class="page_no" data-index="[\d]+">下一页</a>`)

//自定义一个编号避免多个网站ID重复
const SNO = "10000"

func JobListParse(contents []byte) engine.ParseResult {
	matches := listRe.FindAllSubmatch(contents, -1)

	result := engine.ParseResult{Requests: nil}

	for _, m := range matches {
		c := m[0]
		job := model.Job{}

		job.Name = string(m[4])
		job.Address = util.ExtractString(c, addrRe)
		job.Time = util.ExtractString(c, timeRe)

		job.Salary = string(m[2])
		mwMatch := workerTimeRe.FindSubmatch(c)
		if len(mwMatch) >= 2 {
			job.WorkTime = string(mwMatch[1])
		}

		job.Company = string(m[3])
		url := fmt.Sprintf("https://www.lagou.com/jobs/%s.html", m[1])

		result.Item = append(result.Item, engine.Item{Id: SNO + string(m[1]), Url: url, Type: "jobs", Form: "拉勾网", Payload: job})
	}

	//解析下一页数据把所有Url添加到Requests中
	pMatches := pageRe.FindSubmatch(contents)

	if len(pMatches) >= 2 {
		pUrl := string(pMatches[1])
		result.Requests = append(result.Requests, engine.Request{
			Url:        pUrl,
			ParserFunc: JobListParse,
		})
	}

	return result
}
