import scrapy
import requests,json,math
from bid.items import BidItem
from bid.tools import *


class JiangsuSpider(scrapy.Spider):
    name = 'jiangsu'
    allowed_domains = ['jsggzy.jszwfw.gov.cn/']
    start_urls = ['http://jsggzy.jszwfw.gov.cn/']
    url = 'http://jsggzy.jszwfw.gov.cn/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew'
    t_dic ={
        '003001001':['建设工程-招标公告/资审公告','1'],
        '003001006':['建设工程-未入围公示','2'],
        '003001005':['建设工程-最高限价公示','2'],
        '003001007':['建设工程-中标候选人公示','2'],
        '003001008':['建设工程-中标结果公示','3'],
        '003002001':['交通工程-招标公告','1'],
        '003002003':['交通工程-中标候选人公示','2'],
        '003002004':['交通工程-中标结果公示','3'],
        '003003001':['水利工程-招标公告','1'],
        '003003003':['水利工程-中标候选人公示','2'],
        '003003004':['水利工程-中标结果公示','3'],
        '003004001':['政府采购-采购预告','0'],
        '003004002':['政府采购-采购公告','1'],
        '003004003':['政府采购-更正公告','2'],
        '003004006':['政府采购-成交公告','3'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg =[]
    def start_requests(self):
        for page in range(1,999999):
            import time
            data = '{"token":"","pn":%s,"rn":20,"sdt":"","edt":"","wd":"","inc_wd":"","exc_wd":"","fields":"title","cnum":"001","sort":"{\\"infodatepx\\":\\"0\\"}","ssort":"title","cl":200,"terminal":"","time":[{"fieldName":"infodatepx","startTime":"2020-11-01 00:00:00","endTime":"%s 23:59:59"}],"statistics":null,"unionCondition":null,"accuracy":"","noParticiple":"1","searchRange":null,"isBusiness":"1"}'%((page-1)*20,time.strftime('%Y-%m-%d'))
            res = requests.post(url=self.url,data=data,headers=self.headers).json()
            ls = res['result']['records']
            last_page = math.ceil(res['result']['totalcount']/20)
            if page > last_page:
                break
            for l in ls:
                item = {}
                item['link'] = 'http://jsggzy.jszwfw.gov.cn' + l['linkurl']
                key = item['link'].split('/')[-3]
                if key not in self.t_dic:
                    continue
                item['title'] = l['title']
                item['time'] = l['infodateformat']
                item['classification'] = '江苏-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    self.msg.append(page)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
            if page in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="ewb-trade-right l"]')
        item = get_field(dict(item))
        yield item