import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class GuangxiSpider(scrapy.Spider):
    name = 'guangxi'
    allowed_domains = ['gxggzy.gxzf.gov.cn/']
    start_urls = ['http://gxggzy.gxzf.gov.cn/']
    url = 'http://gxggzy.gxzf.gov.cn/igs/front/search/list.html?&filter%5BDOCTITLE%5D=&pageNumber={}&pageSize=10&index=gxggzy_jyfw&type=jyfw&filter%5Bparentparentid%5D=&filter%5Bparentchnldesc%5D=&filter%5Bchnldesc%5D=&filter%5BSITEID%5D=234&orderProperty=PUBDATE&orderDirection=desc&filter%5BAVAILABLE%5D=true'
    t_dic ={
        43195:['铁路工程-招标公告','1'],
        43196:['铁路工程-澄清变更','2'],
        43198:['铁路工程-上限价','2'],
        43200:['铁路工程-中标候选人公示','2'],
        43201:['铁路工程-中标结果人公示','3'],
        43205:['房建市政-招标公告','1'],
        43206:['房建市政-澄清变更','2'],
        43208:['房建市政-上限价','2'],
        43210:['房建市政-中标候选人公示','2'],
        43211:['房建市政-中标结果人公示','3'],
        43222:['交通工程-交易公告','1'],
        43223:['交通工程-澄清变更','2'],
        43225:['交通工程-上限价','2'],
        43226:['交通工程-中标候选人公示','2'],
        43228:['交通工程-中标结果人公示','3'],
        43231:['水利工程-交易公告','1'],
        43233:['水利工程-澄清变更','2'],
        99464:['水利工程-上限价','2'],
        43235:['水利工程-中标候选人公示','2'],
        43236:['水利工程-中标结果人公示','3'],
        84131:['政府采购（公开招标）-预公示','0'],
        84132:['政府采购（公开招标）-采购公告','1'],
        84133:['政府采购（公开招标）-更正公告','2'],
        84134:['政府采购（公开招标）-中标公告','3'],
        84142:['政府采购（竞争性磋商）-采购公告','1'],
        84143:['政府采购（竞争性磋商）-更改公告','2'],
        84144:['政府采购（竞争性磋商）-中标公告','3'],
        84147:['政府采购（竞争性谈判）-采购公告','1'],
        84148:['政府采购（竞争性谈判）-更改公告','2'],
        84149:['政府采购（竞争性谈判）-中标公告','3'],
        84157:['政府采购（单一来源）-采购公告','1'],
        84158:['政府采购（单一来源）-更改公告','2'],
        84159:['政府采购（单一来源）-中标公告','3'],
        84104:['药械采购（药品）-通知公告','1'],
        84108:['药械采购（医疗耗材）-通知公告','1'],
        84110:['药械采购（疫苗）-通知公告','1'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg =[]
    def start_requests(self):
        for page in range(1,999999):
            res = requests.get(url=self.url.format(page),headers=self.headers)
            try:
                ls = res.json()['page']['content']
            except:
                break
            time.sleep(1)
            for l in ls:
                item = {}
                item['link'] = l['DOCPUBURL']
                key = l['CHANNELID']
                if key not in self.t_dic:
                    continue
                item['title'] = l['DOCTITLE']
                item['time'] = l['PUBDATE'][:10]
                item['classification'] = '广西-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    self.msg.append(page)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
            if page in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="ewb-details-info"]')
        item = get_field(dict(item))
        yield item