import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class SichuanSpider(scrapy.Spider):
    name = 'sichuan'
    allowed_domains = ['ggzyjy.sc.gov.cn/']
    start_urls = ['http://ggzyjy.sc.gov.cn/']
    url = 'http://ggzyjy.sc.gov.cn/inteligentsearch/rest/inteligentSearch/getFullTextData'
    t_dic ={
        '002001001':['工程建设-招标公告','1'],
        '002001002':['工程建设-资格预审补遗/澄清','2'],
        '002001003':['工程建设-招标文件补遗/澄清','2'],
        '002001004':['工程建设-流标或终止公告','2'],
        '002001005':['工程建设-开标记录','2'],
        '002001006':['工程建设-评标结果公示','2'],
        '002002001':['政府采购-采购公告','1'],
        '002002002':['政府采购-更正公告','2'],
        '002002003':['政府采购-中标公告','3'],
        '002002004':['政府采购-签约履行','2'],
        '002002005':['政府采购-终止公告','2'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg =[]
    t = time.time()
    def start_requests(self):
        for i in range(365*12):
            start = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.t - 7200 - i * 7200))
            end = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.t - i * 7200))
            data = '{"pn":0,"rn":12,"sort":"{\'webdate\':\'0\'}","ssort":"title","cl":500,"condition":[{"fieldName":"categorynum","equal":"002","notEqual":null,"equalList":null,"notEqualList":null,"isLike":true,"likeType":2}],,"time":[{"fieldName":"webdate","startTime":"%s","endTime":"%s"}],"isBusiness":"1"}' % (start, end)
            response = requests.post(self.url,headers=self.headers, data=data, verify=False)
            count = response.json()['result']['totalcount']
            if not count:
                continue
            time.sleep(1)
            data = '{"pn":0,"rn":%s,"sort":"{\'webdate\':\'0\'}","ssort":"title","cl":500,"condition":[{"fieldName":"categorynum","equal":"002","notEqual":null,"equalList":null,"notEqualList":null,"isLike":true,"likeType":2}],,"time":[{"fieldName":"webdate","startTime":"%s","endTime":"%s"}],"isBusiness":"1"}' % (count,start, end)
            res = requests.post(url=self.url,data=data,headers=self.headers).json()
            ls = res['result']['records']
            for l in ls:
                item = {}
                item['link'] = 'http://ggzyjy.sc.gov.cn' + l['linkurl']
                key = item['link'].split('/')[-3]
                if key not in self.t_dic:
                    continue
                item['title'] = l['title']
                item['time'] = l['webdate'][:10]
                item['classification'] = '四川-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    self.msg.append(i)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
                time.sleep(1)
                break
            if i in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="container news-detailed"]//div[@class="clearfix"]')
        item = get_field(dict(item))
        yield item