import scrapy, requests
from bid.tools import *
from bid.items import BidItem


class TianjinSpider(scrapy.Spider):
    name = 'tianjin'
    allowed_domains = ['http://60.28.163.169/', 'http://ggzy.zwfwb.tj.gov.cn/']
    t_lis = [
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=86&beginTime=&endTime=',
            '政府采购-需求公示','0'],
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=87&beginTime=&endTime=',
            '政府采购-采购公告','1'],
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=90&beginTime=&endTime=',
            '政府采购-更正公告','2'],
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=88&beginTime=&endTime=',
            '政府采购-采购结果公告','3'],
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=176&beginTime=&endTime=',
            '政府采购-合同及验收公告','2'],
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=81&beginTime=&endTime=',
            '工程建设-招标公告','1'],
        [
            'http://60.28.163.169/queryContent_%s-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=83&beginTime=&endTime=',
            '工程建设-中标结果','3'],
    ]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg = []
    def start_requests(self):
        for lis in self.t_lis:
            for page in range(1, 9999):
                res = requests.get(lis[0] % page, headers=self.headers).text
                ls = re.findall(
                    'lass="article-list3-t".*?url="(.*?)" target="_blank">\s*(.*?)\s*</a>.*?list-times">\s*(.*?)\s*<',
                    res, re.S)
                last_page = re.findall('<option value="\d+" >(\d+)</option>', res, re.S)[-1]
                if page > int(last_page):
                    break
                for l in ls:
                    item = {}
                    key = re.findall('/(\d+).jhtml', l[0])[0]
                    key_ = aes_encrypt(key, AES.MODE_ECB, 'qnbyzzwmdgghmcnm', 'pkcs7').replace('=', '').replace('/',
                                                                                                                '%5E')
                    item['link'] = l[0].replace(key, key_).replace(':80', '')
                    item['time'] = l[2]
                    item['time'] = l[2]
                    item['title'] = re.sub('<.*?>', '', l[1])
                    item['classification'] = '天津-'+lis[1]
                    item['typ'] = lis[-1]
                    if redis_dupefilter(item) or item['time'].startswith('2021'):
                        self.msg.append(lis)
                        break
                    yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
                if lis in self.msg:
                    print(lis[1],'完成')
                    break
                time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="content-article"]/div[@id="content"]')
        item = get_field(dict(item))
        yield item
