import scrapy
import requests
from bid.items import BidItem
from bid.tools import *


class JilinSpider(scrapy.Spider):
    name = 'jilin'
    allowed_domains = ['www.ggzyzx.jl.gov.cn']
    start_urls = ['https://www.ggzyzx.jl.gov.cn/']
    t_lis = [
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcjz/zbgg/index%s.html', '政府采购（集中）-招标公告','1'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcjz/zqyjgg/index%s.html', '政府采购（集中）-征求意见公告','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcjz/bggg/index%s.html', '政府采购（集中）-变更（延期）公告','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcjz/zbjggg/index%s.html', '政府采购（集中）-中标（成交）公告','3'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcjz/htgg/index%s.html', '政府采购（集中）-合同公告','3'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcjz/fbgg/index%s.html', '政府采购（集中）-废标（终止）公告','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcfjz/cgzxzbgg/index%s.html', '政府采购（非集中）-招标公告','1'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcfjz/cgzxbggg/index%s.html', '政府采购（非集中）-变更（延期）公告','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcfjz/cgzxzbjggg/index%s.html', '政府采购（非集中）-中标（成交）公告','3'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcfjz/cgzxhtgg/index%s.html', '政府采购（非集中）-合同公告','3'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/zcfjz/cgzxfbgg/index%s.html', '政府采购（非集中）-废标（终止）公告','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/cgzxgcjs/cgzxzbgg/index%s.html', '工程建设-招标公告','1'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/cgzxgcjs/cgzxbggg/index%s.html', '工程建设-变更公告','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/cgzxgcjs/zbhxrgs/index%s.html', '工程建设-中标候选人公示','2'],
        ['http://www.ggzyzx.jl.gov.cn/jygg/cgzxgcjs/zbgg/index%s.html', '工程建设-中标公告','3'],
    ]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg = []
    def start_requests(self):
        for lis in self.t_lis:
            for page in range(1, 9999):
                if page == 1:
                    res = requests.get(lis[0] % '', headers=self.headers)
                else:
                    res = requests.get(lis[0] % f'_{page}', headers=self.headers)
                res.encoding = 'utf-8'
                ls = re.findall('<li>\s*<a href="\.(.*?)" target="_blank">(.*?)</a>.*?class="time_3">\s*(.*?)\s*<', res.text, re.S)
                for l in ls:
                    item = {}
                    item['link'] = lis[0].replace('/index%s.html',l[0])
                    item['title'] = re.sub('<.*?>','',l[1])
                    item['time'] = l[2]
                    item['classification'] = '吉林-'+lis[1]
                    item['typ'] = lis[-1]
                    if redis_dupefilter(item) or item['time'].startswith('2021'):
                        self.msg.append(lis)
                        break
                    yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
                if lis in self.msg:
                    print(lis[1],'完成')
                    break
                time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="mains m_t_60"]')
        item = get_field(dict(item))
        yield item
