import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class HebeiSpider(scrapy.Spider):
    name = 'hebei'
    allowed_domains = ['ggzy.hebei.gov.cn/']
    start_urls = ['http://ggzy.hebei.gov.cn/']
    url = 'http://ggzy.hebei.gov.cn/EpointWebBuilderZx/rest/infolist/get'
    t_dic ={
        '001002001001':['政府采购-采购/资审公告','1'],
        '001002001002':['政府采购-更正公告','2'],
        '001002001003':['政府采购-结果公告','3'],
        '001002001004':['政府采购-变更结果公告','2'],
        '001002001005':['政府采购-取消中标单位中标资格公告','2'],
        '001002002001':['工程建设-招标/资审公告','1'],
        '001002002002':['工程建设-澄清/变更公告','2'],
        '001002002003':['工程建设-中标候选人公示','2'],
        '001002002004':['工程建设-招标结果公告','3'],
        '001002005003':['国际招标-招标公告','1'],
        '001002005004':['国际招标-变更公告','2'],
        '001002005005':['国际招标-结果公告','3'],
        '001002005006':['国际招标-结果变更公告','2'],
        '001002005007':['国际招标-废标公告','2'],
        '001002006001':['其他-交易公告','1'],
        '001002006003':['其他-成交公示','3'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
    }

    msg =[]
    def start_requests(self):
        for page in range(1,999999):
            data = {
                'cat': '001002',
                'index': str(page),
                'pageSize': '13'
            }
            ls = requests.post(url=self.url,data=data,headers=self.headers).json()
            if not ls:
                break
            for l in ls:
                item = {}
                item['link'] = 'http://ggzy.hebei.gov.cn/hbjyzx' + l['infourl']
                key = item['link'].split('/')[-3]
                if key not in self.t_dic:
                    continue
                item['title'] = l['title']
                item['time'] = l['infodate']
                item['classification'] = '河北-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    print('重复')
                    self.msg.append(page)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
            if page in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="ewb-con-p"]')
        item = get_field(dict(item))
        yield item