import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class LiaoningSpider(scrapy.Spider):
    name = 'liaoning'
    allowed_domains = ['www.lnggzy.gov.cn/']
    start_urls = ['http://www.lnggzy.gov.cn/']
    import time
    url = f'http://www.lnggzy.gov.cn/lnggzy/showinfo/Morejyxx.aspx?timebegin=2016-11-05&timeend={time.strftime("%Y-%m-%d")}&timetype=06&num1=000&num2=000000&jyly=005&word='
    t_dic ={
        '005003001001':['政府采购-采购/资质预审公告','tv001','1'],
        '005003001002':['政府采购-变更公告','tv002','2'],
        '005002001003':['政府采购-采购合同公示','tv003','3'],
        '005003001004':['政府采购-中标结果公示','tv004','3'],
        '005002002001':['建设工程-采购/资质预审公告','tv001','1'],
        '005014002003':['建设工程-中标候选人公示','tv003','2'],
        '005015002004':['建设工程-中标结果公示','tv004','3'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg =[]
    def start_requests(self):
        session = requests.session()
        response = session.get(self.url)
        import re
        __VIEWSTATE = re.findall('__VIEWSTATE" value="(.*?)"', response.text, re.S)[0]
        __VIEWSTATEGENERATOR = re.findall('__VIEWSTATEGENERATOR" value="(.*?)"', response.text, re.S)[0]
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36',
            'Referer': self.url
            }
        for page in range(1,999999):
            data = {
                '__EVENTTARGET': 'MoreInfoListjyxx1$Pager',
                '_EVENTARGUMENT': '999999',
                '__VIEWSTATE': __VIEWSTATE,
                '__VIEWSTATEGENERATOR': __VIEWSTATEGENERATOR,
                '__VIEWSTATEENCRYPTED': '',
                'MoreInfoListjyxx1$Pager_input': str(page)
            }
            response = session.post(self.url,data=data, headers=headers, verify=False).text
            ls = re.findall('<h4> <a class="ewb-list-name" href="(.*?)" target="_blank" title="(.*?)".*?<span class="span_o">\s*(.*?)\s*</span></h4>',response,re.S)
            last_page = re.findall('页码<b><span id="MoreInfoListjyxx1_ys">\d+/(\d+)<',response,re.S)[0]
            if page > int(last_page):
                break
            for l in ls:
                item = {}
                item['link'] = 'http://www.lnggzy.gov.cn' + l[0]
                key = item['link'].split('=')[-1]
                if key not in self.t_dic:
                    continue
                item['title'] = l[1]
                infoid = re.findall('InfoID=(.*?)&',item['link'])[0]
                CategoryNum = re.findall('CategoryNum=(\d+)',item['link'])[0]
                item['time'] = l[2]
                item['classification'] = '辽宁-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if '建设工程' in item['classification']:
                    item['link'] = f'http://www.lnggzy.gov.cn/lnggzy/ZtbInfo/Jsgc.aspx?InfoID={infoid}&CategoryNum={CategoryNum}'
                else:
                    item['link'] = f'http://www.lnggzy.gov.cn/lnggzy/ZtbInfo/Zfcg.aspx?InfoID={infoid}&CategoryNum={CategoryNum}'
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    self.msg.append(page)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
            if page in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@id="%s"]'%item['classification'][1])
        item = get_field(dict(item))
        item['classification']=item['classification'][0]
        yield item