import scrapy
import requests,json
from bid.items import BidItem
from bid.tools import *


class HubeiSpider(scrapy.Spider):
    name = 'hubei'
    allowed_domains = ['www.hbbidcloud.cn/']
    start_urls = ['http://www.hbbidcloud.cn/']
    url = 'http://www.hbbidcloud.cn/hubei/jyxx/about.html?categoryNum=004&pageIndex=%s'
    t_dic ={
        '004001001':['房建市政-项目注册','0'],
        '004001002':['水利工程-项目注册','0'],
        '004001003':['交通工程-项目注册','0'],
        '004001004':['铁路工程-项目注册','0'],
        '004001006':['其他工程-项目注册','0'],
        '004001007':['政府采购-项目注册','0'],
        '004002001':['房建市政-招标公告','1'],
        '004002002':['水利工程-招标公告','1'],
        '004002003':['交通工程-招标公告','1'],
        '004002004':['铁路工程-招标公告','1'],
        '004002006':['其他工程-招标公告','1'],
        '004002007':['政府采购-招标公告','1'],
        '004003001':['房建市政-澄清/招标异常','2'],
        '004003002':['水利工程-澄清/招标异常','2'],
        '004003003':['交通工程-澄清/招标异常','2'],
        '004003004':['铁路工程-澄清/招标异常','2'],
        '004003006':['其他工程-澄清/招标异常','2'],
        '004003007':['政府采购-澄清/招标异常','2'],
        '004004001':['房建市政-评标结果公示','2'],
        '004004002':['水利工程-评标结果公示','2'],
        '004004003':['交通工程-评标结果公示','2'],
        '004004004':['铁路工程-评标结果公示','2'],
        '004004006':['其他工程-评标结果公示','2'],
        '004004007':['政府采购-评标结果公示','2'],
        '004005001':['房建市政-中标结果','3'],
        '004005002':['水利工程-中标结果','3'],
        '004005003':['交通工程-中标结果','3'],
        '004005004':['铁路工程-中标结果','3'],
        '004005006':['其他工程-中标结果','3'],
        '004005007':['政府采购-中标结果','3'],
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg =[]
    def start_requests(self):
        for page in range(1,999999):
            res = requests.get(url=self.url%page,headers=self.headers).text
            ls = re.findall('<li class="wb-data-list">.*?href="(.*?)" title="(.*?)".*?"wb-data-date">(.*?)<',res,re.S)
            last_page = math.ceil(int(re.findall('\stotal: (\d+),',res,re.S)[-1])/19)
            if page > last_page:
                break
            for l in ls:
                item = {}
                item['link'] = 'http://www.hbbidcloud.cn' + l[0]
                key = item['link'].split('/')[-3]
                if key not in self.t_dic:
                    continue
                item['title'] = l[1]
                item['time'] = l[2]
                item['classification'] = '湖北-'+self.t_dic[key][0]
                item['typ']=self.t_dic[key][-1]
                if redis_dupefilter(item) or item['time'].startswith('2021'):
                    self.msg.append(page)
                    break
                yield scrapy.Request(url=item['link'], callback=self.parse, meta={'item': item})
            if page in self.msg:
                print('完成')
                break
            time.sleep(len(ls))

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        item['content'] = get_content(response.text, '//div[@class="ewb-article-info"]')
        item = get_field(dict(item))
        yield item