import scrapy
import requests
from bid.items import BidItem
from bid.tools import *


class JiangxiSpider(scrapy.Spider):
    name = 'jiangxi'
    allowed_domains = ['ggzy.jiangxi.gov.cn']
    start_urls = ['https://ggzy.jiangxi.gov.cn/']
    t_lis = [
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002001/002001001/%s.html', '房间及市政工程-招标公告','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002001/002001002/%s.html', '房间及市政工程-答疑澄清','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002001/002001004/%s.html', '房间及市政工程-中标公示','3'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002002/002002006/%s.html', '交通工程-招标计划','0'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002002/002002002/%s.html', '交通工程-招标公告','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002002/002002003/%s.html', '交通工程-补遗书','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002002/002002005/%s.html', '交通工程-中标公示','3'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002003/002003001/%s.html', '水利工程-资格预审公告/招标公告','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002003/002003002/%s.html', '水利工程-补遗澄清','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002003/002003004/%s.html', '水利工程-中标候选人公示','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002003/002003005/%s.html', '水利工程-中标结果公示','3'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002005/002005001/%s.html', '重点/外贷-招标公告','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002005/002005002/%s.html', '重点/外贷-答疑澄清','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002005/002005004/%s.html', '重点/外贷-结果公示','3'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006007/%s.html', '政府采购-采购意向','0'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006001/%s.html', '政府采购-采购公告','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006002/%s.html', '政府采购-变更公告','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006003/%s.html', '政府采购-答疑澄清','2'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006004/%s.html', '政府采购-结果公示','3'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006005/%s.html', '政府采购-单一来源公示','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002006/002006006/%s.html', '政府采购-合同公示','3'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002013/002013001/%s.html', '其他项目-交易公告','1'],
        ['https://ggzy.jiangxi.gov.cn/web/jyxx/002013/002013002/%s.html', '其他项目-成交公示','3'],
    ]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg = []
    def start_requests(self):
        for lis in self.t_lis:
            for page in range(1, 9999):
                res = requests.get(lis[0] % page, headers=self.headers)
                res.encoding = 'utf-8'
                ls = re.findall('<li class="ewb-list-node clearfix">\s*<a href="(.*?)".*?>\s*(.*?)\s*</a>.*?"ewb-list-date">(.*?)<', res.text, re.S)
                last_page = re.findall('/(\d+).html" >末页<', res.text, re.S)[0]
                if page > int(last_page):
                    break
                for l in ls:
                    item = {}
                    item['link'] = 'https://ggzy.jiangxi.gov.cn' + l[0]
                    item['title'] = re.sub('<.*?>','',l[1])
                    item['time'] = l[2]
                    item['classification'] = '江西-'+lis[1]
                    item['typ'] = lis[-1]
                    if redis_dupefilter(item) or item['time'].startswith('2021'):
                        self.msg.append(lis)
                        break
                    yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
                if lis in self.msg:
                    print(lis[1],'完成')
                    break
                time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="con"]')
        item = get_field(dict(item))
        yield item
