import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class AnhuiSpider(scrapy.Spider):
    name = 'anhui'
    allowed_domains = ['ggzy.ah.gov.cn']
    start_urls = ['https://ggzy.ah.gov.cn/']
    t_lis = [
        ['{"currentPage":"%s","tenderProjectType":"A01","bulletinNature":"1"}', '建筑工程-招标公告','tender','1'],
        ['{"currentPage":"%s","tenderProjectType":"A01","bulletinNature":"2"}', '建筑工程-中标候选人公示','pbjg','2'],
        ['{"currentPage":"%s","tenderProjectType":"A01","bulletinNature":"3"}', '建筑工程-中标公示','zbjg','3'],
        ['{"currentPage":"%s","tenderProjectType":"A07","bulletinNature":"1"}', '水利工程-招标公告','tender','1'],
        ['{"currentPage":"%s","tenderProjectType":"A07","bulletinNature":"2"}', '水利工程-中标候选人公示','pbjg','2'],
        ['{"currentPage":"%s","tenderProjectType":"A07","bulletinNature":"3"}', '水利工程-中标公示','zbjg','3'],
        ['{"currentPage":"%s","tenderProjectType":"AAA","bulletinNature":"1"}', '交通工程-招标公告','tender','1'],
        ['{"currentPage":"%s","tenderProjectType":"AAA","bulletinNature":"2"}', '交通工程-中标候选人公示','pbjg','2'],
        ['{"currentPage":"%s","tenderProjectType":"AAA","bulletinNature":"3"}', '交通工程-中标公示','zbjg','3'],
        ['{"currentPage":"%s","tenderProjectType":"A99","bulletinNature":"1"}', '其他工程-招标公告','tender','1'],
        ['{"currentPage":"%s","tenderProjectType":"A99","bulletinNature":"2"}', '其他工程-中标候选人公示','pbjg','2'],
        ['{"currentPage":"%s","tenderProjectType":"A99","bulletinNature":"3"}', '其他工程-中标公示','zbjg','3'],
        ['{"currentPage":"%s","bulletinNature":"1"}', '政府采购-采购公告','bulletin','1'],
        ['{"currentPage":"%s","bulletinNature":"3"}', '政府采购-交易结果','zbjg','3'],
    ]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg = []
    def start_requests(self):
        for lis in self.t_lis:
            for page in range(1, 9999):
                if '政府采购' in lis[1]:
                    url = 'http://ggzy.ah.gov.cn/zfcg/list'
                else:
                    url = 'http://ggzy.ah.gov.cn/jsgc/list'
                res = requests.post(url,headers=self.headers,data=json.loads(lis[0]%page))
                res.encoding = 'utf-8'
                ls = re.findall(' <li class="list-item">.*?href="(.*?)".*?title="(.*?)".*?class="date float-r m-r-40">(.*?)<', res.text, re.S)
                last_page = re.findall('ss="dian">共(\d+)页', res.text, re.S)[0]
                if page > int(last_page):
                    break
                for l in ls:
                    item = {}
                    item['link'] = 'http://ggzy.ah.gov.cn' + l[0]
                    item['title'] = l[1]
                    item['time'] = l[2]
                    item['classification'] = '安徽-'+lis[1]
                    item['typ'] = lis[-1]
                    if redis_dupefilter(item) or item['time'].startswith('2021'):
                        self.msg.append(lis)
                        break
                    yield scrapy.FormRequest(url=url.replace('list','newDetailSub'),formdata={
                        'type':lis[2],
                        'bulletinNature':item['link'].split('=')[-1],
                        'guid':re.findall('guid=(.*?)&',item['link'])[0]
                    },
                                             callback=self.parse, meta={'item': item})
                if lis in self.msg:
                    print(lis[1],'完成')
                    break
                time.sleep(len(ls))
    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="article-text-box m-b-50 m-t-50"]')
        item = get_field(dict(item))
        yield item
