package parser

import (
	"encoding/json"
	"fmt"
	"log"
	"my-crawler/concurrence/engine"
	"my-crawler/concurrence/fetcher"
	"my-crawler/concurrence/task/gzh_comments_num/dao"
	"my-crawler/concurrence/task/gzh_comments_num/models"
	"time"
)

//步骤2 获取comment数目 再请求下一篇文章
func GetCommentsNumParser(content []byte, ReqData engine.ReqData) engine.ParserResult {
	time.Sleep(1 * time.Second)
	fmt.Println(string(content))
	//解析json
	var result models.Result
	err := json.Unmarshal(content, &result)
	if err != nil {
		//返回err了 结束进程
		log.Fatal(err)
	}
	res := engine.ParserResult{}

	//下一条数据
	payload := ReqData.Payload
	var id string //请求带过来的id就是当前数据的id只要比当前数据大 不用再++了
	if cursor, ok := payload["id"]; !ok {
		log.Fatal("没id")
	} else {
		id = cursor
	}
	//update
	if result.TotalNums > 0 {
		result.ID = id
		fmt.Println("TotalNums:", result.TotalNums)
		//保存数据库
		res.Item = append(res.Item, result)
	}
	reqData, header, reqUrl := dao.GetModels(id)

	//追加请求
	res.Request = append(res.Request, engine.CrawlerRequest{
		ReqData:  engine.CreateReqData(header, reqData, reqUrl), //数据初始化
		HttpFunc: &fetcher.GetReq{},                             //请求
		Parser:   GetCommentIdParser,
	})
	return res
}
