import json

import scrapy
import pymongo

class A招聘网Spider(scrapy.Spider):
    name = "招聘网"

    # start_urls = ['http://job.mohrss.gov.cn/cjobs/jobinfolist/listJobinfolist?pageNo=1&pagecount=20877&totalpages=20877&totalcount=417521&ACB241=&rowid=&AAE397=&textfield=&aab019_t=&aab019=&aab020_t=&aab020=&aab022_t=&aab022=&acb239_t=&acb239=&acb228_t=&acb228=&aac011_t=&aac011=&searchtype=&orderType=score&zcType=&AREA=&AREA_name=&ACA111=&ACA111_name=&s_aae397=&s_acb241=0']

    async def start(self):
        for i in range(1,100):
            yield scrapy.Request(f'http://job.mohrss.gov.cn/cjobs/jobinfolist/listJobinfolist?pageNo={i}')


    def parse(self, response):
        # print(response.text)
        raw_value = response.xpath('//input[@id="findjoblist"]/@value').get()
        job_list = json.loads(raw_value)
        client = pymongo.MongoClient('mongodb://localhost:27017/')
        db = client['招聘网']
        coll = db["data"]

        for job in job_list:
            data =[{
                "id":job.get("aab001"),
                "name":job.get("aca112"),
                "地址":job.get("acb202"),
                "score":job.get("acb241"),

            }]
            coll.insert_many(data)



