import scrapy
from spidertools.utils.time_utils import get_current_date
import re
import requests
from commonresources.spider_items.beijing.items import BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiItem
from commonresources.spiders.basespider import BaseSpider
from scrapy.selector import Selector
import requests.packages.urllib3
class BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiSpider(BaseSpider):
    """
        北京市公共资源交易服务平台     https://ggzyfw.beijing.gov.cn/index.html
    """
    name = 'BeiJingShiGongGongZiYuanJiaoYiFuWuPingTai'
    name_zh = "北京市公共资源交易服务平台"
    province = "北京"
    # allowed_domains = ['ccgp-beijing.gov.cn']

    start_urls = ['https://ggzyfw.beijing.gov.cn/jyxxggjtbyqs/index.html']

    def __init__(self, full_dose=True):
        super(BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiSpider, self).__init__(full_dose)

    def start_requests(self):
        type_list = ['https://ggzyfw.beijing.gov.cn/jyxxgcjszbjh/index.html',#招标计划
                     'https://ggzyfw.beijing.gov.cn/jyxxggjtbyqs/index.html',#招标公告
                     'https://ggzyfw.beijing.gov.cn/jyxxzbhxrgs/index.html',#中标候选人公示
                     'https://ggzyfw.beijing.gov.cn/jyxxzbgg/index.html',#中标结果 https://ggzyfw.beijing.gov.cn/jyxxzbgg/index_2.html
                     'https://ggzyfw.beijing.gov.cn/jyxxgcjshtgs/index.html'#合同公示
     ]
        for type in type_list:
            if type =='https://ggzyfw.beijing.gov.cn/jyxxgcjszbjh/index.html':
                type_name='招标计划'
            elif type =='https://ggzyfw.beijing.gov.cn/jyxxggjtbyqs/index.html':
                type_name = '招标公告'
            elif type =='https://ggzyfw.beijing.gov.cn/jyxxzbhxrgs/index.html':
                type_name = '中标候选人公示'
            elif type =='https://ggzyfw.beijing.gov.cn/jyxxzbgg/index.html':
                type_name = '中标公告'
            elif type =='https://ggzyfw.beijing.gov.cn/jyxxgcjshtgs/index.html':
                type_name = '合同公示'
            page_number=50
            yield scrapy.Request(
                url=type,
                method="get",
                meta={
                    'first_url':type,
                    "need_break": False,
                    'type': type_name,
                    'page': page_number
                },
                callback=self.first_parse,
            )

    def first_parse(self,response):
        second_page_number = 1
        headers = {
            'USER_AGENT': "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1"
        }
        if not response.meta['need_break']:
            while second_page_number < int(response.meta['page']):
                page = 'index_'+str(second_page_number)+'.html'
                start_url = response.meta['first_url'].replace('index.html',page)
                if 'index_1.html' in start_url:
                    start_url=start_url.replace('index_1.html','index.html')
                if not response.meta['need_break']:
                    requests.packages.urllib3.disable_warnings()#不显示报错
                    html = requests.get(url=start_url, headers=headers,verify=False)
                    html.encoding = 'utf-8'
                    f = html.text
                    sel = Selector(text=f)
                    li_list = sel.xpath('//div[@class="content-list"]/ul/li')
                    for li in li_list:
                        origin_url = 'https://ggzyfw.beijing.gov.cn'+li.xpath('./a/@href')[0].root
                        title = li.xpath('./a/@title')[0].root
                        time = li.xpath('./div/p')[0].xpath('string(.)').extract()[0]
                        requests.packages.urllib3.disable_warnings()  # 不显示报错
                        second_html = requests.get(url=origin_url, headers=headers,verify=False)
                        second_html.encoding = 'utf-8'
                        html = second_html.text
                        if response.meta['type']=='招标公告' or response.meta['type']=='中标候选人公示' or response.meta['type']=='中标公告':
                            city = re.findall(r'【(.*?)】', li.xpath('./a/span/p').xpath('string(.)').extract()[0])[0]
                        else:
                            city ='北京'
                        item = BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiItem()
                        item['origin_url'] = origin_url  # 原始网站
                        item['announcement_title'] = title  # 公告标题
                        item['project_area'] = city #所属片区
                        item['release_time'] = time # 发布时间
                        item['announcement_type'] = response.meta['type']  # 公告类型
                        item['html'] = html # 网页源码
                        item['source_type'] = '北京市公共资源交易服务平台'
                        item['province'] = '北京'
                        item['is_parsed'] = 0
                        # print(item)
                        if not self.full_dose and item['release_time'] != get_current_date():
                            response.meta['need_break'] = True
                        else:
                            yield item
                print('#####################################')
                print(str(second_page_number )+ '页')

                # time.sleep(1)
                if not response.meta['need_break']:
                    second_page_number += 1
                else:
                    break