import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class GuangdongSpider(scrapy.Spider):
    name = 'guangdong'
    allowed_domains = ['www.gdzwfw.gov.cn/']
    start_urls = ['http://www.gdzwfw.gov.cn/']
    url = 'https://www.gdzwfw.gov.cn/ggzy/ggzy-admin/rest/searchAction/getListjy'
    t_dic ={
        '002001001001':['房屋建筑和市政基础设施工程-资格预审公告','2'],
        '002001001002':['房屋建筑和市政基础设施工程-招标公告','1'],
        '002001001006':['房屋建筑和市政基础设施工程-中标结果公告','3'],
        '002001001007':['房屋建筑和市政基础设施工程-其他','2'],
        '002001001008':['房屋建筑和市政基础设施工程-中标候选人公示','2'],
        '002001002001':['交通运输工程-资格预审公告','2'],
        '002001002002':['交通运输工程-招标公告','1'],
        '002001002006':['交通运输工程-中标结果公告','3'],
        '002001002007':['交通运输工程-其他','2'],
        '002001002008':['交通运输工程-中标候选人公示','2'],
        '002001003001':['水利工程-资格预审公告','1'],
        '002001003002':['水利工程-招标公告','1'],
        '002001003006':['水利工程-中标结果公告','3'],
        '002001003007':['水利工程-其他','2'],
        '002001003008':['水利工程-中标候选人公示','3'],
        '002001004001':['其他工程-资格预审公告','2'],
        '002001004002':['其他工程-招标公告','1'],
        '002001004006':['其他工程-中标结果公告','3'],
        '002001004007':['其他工程-其他','2'],
        '002001004008':['其他工程-中标候选人公示','2'],
        '002004001001':['政府集中采购-采购（资质预审）公告','2'],
        '002004001002':['政府集中采购-更正公告','2'],
        '002004001003':['政府集中采购-中标（成交）结果公告','3'],
        '002004002001':['政府分散采购-采购（资质预审）公告','2'],
        '002004002002':['政府分散采购-更正公告','2'],
        '002004002003':['政府分散采购-中标（成交）结果公告','3'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg =[]
    def start_requests(self):
        for page in range(1,999999):
            data = {
                'params': '{"title":"","pageSize":10,"pageIndex":%s,"siteGuid":"7eb5f7f1-9041-43ad-8e13-8fcb82ea831a","categorynum":"002","categorynum1":"","categorynum2":"","categorynum3":"","xiaqu":"","sdt":"","edt":""}' % page
            }
            res = requests.post(url=self.url,data=data,headers=self.headers).json()
            ls = res['infodata']
            last_page = math.ceil(res['TotalCount']/10)
            if page > last_page:
                break
            for l in ls:
                item = {}
                item['link'] = 'http://www.gdzwfw.gov.cn' + l['infourl']
                key = item['link'].split('/')[-3]
                if key not in self.t_dic:
                    continue
                item['title'] = l['title']
                item['time'] = l['infodate']
                item['classification'] = '广东-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    self.msg.append(page)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
            if page in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text.replace('src="/','src="http://www.gdzwfw.gov.cn/'), '//div[@class="news_content"]')
        item = get_field(dict(item))
        yield item