import scrapy
import requests
from bid.items import BidItem
from bid.tools import *


class NeimengguSpider(scrapy.Spider):
    name = 'neimenggu'
    allowed_domains = ['ggzyjy.nmg.gov.cn']
    start_urls = ['https://ggzyjy.nmg.gov.cn/']
    t_lis = [
        ['http://ggzyjy.nmg.gov.cn/jyxx/jsgcZbgg', '工程建设-招标公告与资格预审公告','1'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/jsgcGzsx', '工程建设-变更/补遗公告','2'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/jsgcKbjl', '工程建设-开标记录','2'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/jsgcZbhxrgs', '工程建设-中标候选人公示','2'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/jsgcZbjggs', '工程建设-交易结果公示','3'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/zfcg/cggg', '政府采购-采购/资格预审公告','1'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/zfcg/gzsx', '政府采购-更正公告','2'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/qtjy/jygg', '其他交易-交易公告','1'],
        ['http://ggzyjy.nmg.gov.cn/jyxx/qtjy/jyqr', '其他交易-交易结果','3'],
        ]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg = []
    def start_requests(self):
        for lis in self.t_lis:
            for page in range(1, 9999):
                res = requests.post(lis[0],data={'currentPage': str(page)}, headers=self.headers)
                tree = etree.HTML(res.text)
                ls = tree.xpath('/html/body/div[2]/div[2]/div/div[4]/table//tr')[1:]
                last_page = re.findall('lass="dian">共(\d+)页',res.text,re.S)[0]
                if page > int(last_page):
                    break
                for l in ls:
                    item = {}
                    item['link'] = 'http://ggzyjy.nmg.gov.cn' + l.xpath('.//a/@href')[0]
                    item['title'] = l.xpath('.//a/@title')[0].strip()
                    try:
                        item['time'] = l.xpath('./td[4]/text()')[0].strip()
                    except:
                        item['time'] = l.xpath('./td[3]/text()')[0].strip()
                    item['classification'] = '内蒙古-'+lis[1]
                    item['typ'] = lis[-1]
                    if redis_dupefilter(item) or item['time'].startswith('2021'):
                        self.msg.append(lis)
                        break
                    yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
                if lis in self.msg:
                    print(lis[1],'完成')
                    break
            if lis in self.msg:
                print(lis[1], '完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="detail_contect"]')
        item = get_field(dict(item))
        yield item
