package main

import (
	"fmt"
	"my-crawler/concurrence/engine"
	"my-crawler/concurrence/fetcher"
	"my-crawler/concurrence/parser"
	"my-crawler/concurrence/task"
	"my-crawler/concurrence/task/get_gzh_articles/dao"
)

//concurrence 并行版
func main() {
	saveChan := dao.SaveChannel()
	newEngine := engine.ConcurrenceEngine{
		WorkerCount: 10,
		SaverChan:   saveChan,
		Scheduler:   &engine.ConcurrenceScheduler{},
	}

	var url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
	reqData := map[string]string{
		"action": "list_ex",
		"begin":  fmt.Sprintf("%d", task.Begin), //一直到245 也就是page为50
		"count":  "5",
		"fakeid": task.Fakeid,
		"type":   "9",
		"token":  task.Token,
		"lang":   "zh_CN",
		"f":      "json",
		"ajax":   "1",
	}
	header := map[string]string{ //需要变
		"Cookie": task.Cookie,
	}
	initData := engine.CreateReqData(header, reqData, url)
	newEngine.Initial(engine.CrawlerRequest{
		ReqData:  initData,          //数据初始化
		HttpFunc: &fetcher.GetReq{}, //请求
		Parser:   parser.ApiJsonParser,
	})
}

func initReqData(header, payload map[string]string, url string) engine.ReqData {
	return engine.ReqData{
		Url:     url,
		Header:  header,
		Payload: payload,
	}
}
