import scrapy
from spidertools.utils.time_utils import get_current_date
import re

from commonresources.spider_items.beijing.items import BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiItem
from commonresources.spiders.basespider import BaseSpider

class BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiSpider(BaseSpider):
    """
        北京市公共资源交易服务平台     https://ggzyfw.beijing.gov.cn/index.html
    """
    name = 'BeiJingShiGongGongZiYuanJiaoYiFuWuPingTai1111111'
    name_zh = "北京市公共资源交易服务平台"
    province = "北京"
    # allowed_domains = ['ccgp-beijing.gov.cn']

    start_urls = ['https://ggzyfw.beijing.gov.cn/jyxxggjtbyqs/index.html']

    def __init__(self, full_dose=False):
        super(BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiSpider, self).__init__(full_dose)

    def parse(self, response):
        objs = response.xpath('//div[@class="panel-search"]/ul[@class="panel-tab"]/li[1]')
        for obj in objs:
            href = 'https://ggzyfw.beijing.gov.cn/'+ obj.xpath('./@data-href').extract_first()
            try:
                public_announcement_type = obj.xpath('./div/text()').extract_first()#公告公示类型
                print(public_announcement_type)
            except:
                pass
            yield scrapy.Request(
                url=href,
                callback=self.handle_response,
                headers=self.fake_headers(),
                dont_filter=True,
                errback=self.handle_error,
                meta={
                    "need_break": False,
                    "public_announcement_type": '工程建设',
            }
        )

    def handle_response(self, response):
        objs = response.xpath('//ul[@class="panel-tab2 clearfix"]/li/ul[@class="panel-search2"]/li/a')
        for obj in objs:
            href = "https://ggzyfw.beijing.gov.cn" + obj.xpath('./@href').extract_first() #公告类型的详情地址
            announcement_type = obj.xpath('./text()').extract_first() #公告类型
            print('详情=================',href)
            print('obj',announcement_type)
            public_announcement_type = response.meta['public_announcement_type']
            c3 = obj.xpath('./@href').extract_first()
            yield scrapy.Request(
                url=href,
                callback=self.handle_response2,
                headers=self.fake_headers(),
                dont_filter=True,
                errback=self.handle_error,
                meta={
                    'href3': href,
                    "need_break": False,
                    "page": 1,
                    "announcement_type": announcement_type,
                    "public_announcement_type": public_announcement_type,
                    "c3": c3[0:-11],
                    "o_url": href[0:-5],
                }
            )
    def handle_response2(self, response):
        # objs = response.xpath('//ul[@class="panel-tab2 clearfix"]/li[2]')
        objs = response.xpath('//ul[@id="extId"]/li')
        announcement_type = response.meta['announcement_type']
        public_announcement_type = response.meta['public_announcement_type']
        if objs:
            for obj in objs:
                if obj.xpath('./@val').extract_first():
                    c3 = '&c3=' + response.meta['c3']
                    c2 = 'c2=jyxx' + objs.xpath('//ul[@class="panel-tab2 clearfix"]/li[2]/@id').extract_first()
                    # print(c2)
                    c4 = '&c4=&e='+obj.xpath('./@val').extract_first()
                    channelId = re.findall(r'<input type="hidden" id="channelId" name="channelId" value="(.*?)"/>', response.text)[0]
                    # 'https://ggzyfw.beijing.gov.cn/cmsbj/queryContent_jyxx.jspx?c1=jyxx&c2=jyxxgcjs&c3=jyxxggjtbyqs&c4=&e=A01&channelId=121&q='
                    href = "https://ggzyfw.beijing.gov.cn/cmsbj/queryContent.jspx?c1=jyxx&" + c2[-4:] + c3 + c4 + "&channelId=" + channelId + "&q="
                    print('详情页网址',c2[-4:],c3,c4,channelId,href)
                    print('项目类型网址：',href)
                    o_url = "https://ggzyfw.beijing.gov.cn/cmsbj/queryContent"
                    # print('hrefhrefhrefhrefhref',href)
                    info_type = obj.xpath('./text()').extract_first() #公告类型
                    print('项目类型',info_type)
                    yield scrapy.Request(
                        url=href,
                        callback=self.handle_response4,
                        headers=self.fake_headers(),
                        dont_filter=True,
                        errback=self.handle_error,
                        meta={
                            "need_break": False,
                            "page": 1,
                            "announcement_type": announcement_type,
                            "public_announcement_type": public_announcement_type,
                            'info_type': info_type,
                            'o_url': o_url,
                            'c2':c2[-4:],
                            'c3':c3,
                            'c4':c4,
                            'channelId':channelId,
                        }
                    )
        else:
            yield scrapy.Request(
                url=response.meta['href3'],
                callback=self.handle_response3,
                headers=self.fake_headers(),
                dont_filter=True,
                errback=self.handle_error,
                meta={
                    "need_break": False,
                    "announcement_type": announcement_type,
                    "public_announcement_type": public_announcement_type,
                    "o_url": response.meta['o_url'],
                    'info_type': 'none',
                    'page': 1,
                }
            )

    def handle_error(self, failure):
        print(failure)
        print(
            f"本条目已结束，信息如下:url:{failure.request.url},announcement_type:{failure.request.meta['announcement_type']},info_type：{failure.request.meta['info_type']}，总页数：{failure.request.meta['page'] + 1}")

    def fake_headers(self):
        return {
            "Accept": 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            "Accept-Encoding": 'gzip, deflate, br',
            "Accept-Language": 'zh-CN,zh;q=0.9',
            "Cache-Control": 'max-age=0',
            "Connection": 'keep-alive',
            "Cookie": '_va_ref=%5B%22%22%2C%22%22%2C1612144126%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3Dh9q7ijJ0MHrsAzedvvQaPI26F2Y465ZNfr1NzwBgHvNhCXpwBNMVIiUbUfrkDXne%26wd%3D%26eqid%3Dac4b9bd0001f6ca30000000260175de8%22%5D; _va_ses=*; clientlanguage=zh_CN; _va_id=ee4557c0827bdddc.1612144126.1.1612147966.1612144126.',
            "Host": 'ggzyfw.beijing.gov.cn',
            "If-Modified-Since": 'Mon, 01 Feb 2021 02:34:23 GMT',
            "If-None-Match": '"601768af-62d3"',
            "sec-ch-ua": '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
            "sec-ch-ua-mobile": '?0',
            "Sec-Fetch-Dest": 'document',
            "Sec-Fetch-Mode": 'navigate',
            "Sec-Fetch-Site": 'none',
            "Sec-Fetch-User": '?1',
            "Upgrade-Insecure-Requests": '1',
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36',
            }

    def handle_response3(self, response):
        if "页" in response.text:
            print('handle_response3')
            objs = response.xpath('//div[@class="content clearfix"]/div[@class="content-list"]/ul/li')
            for obj in objs:

                item = dict()
                if obj.xpath('./div[@class="list-times1"]/p/text()').extract_first():
                    item['release_time'] = obj.xpath('./div[@class="list-times1"]/p/text()').extract_first()  # 正常只有1个时间
                else:
                    item['release_time'] = obj.xpath('./div[@class="list-times2"]/p/text()').extract_first()  # 中标人候选公示有两个时间，所以取第一个
                if not self.full_dose and item['release_time'] != get_current_date():
                # if not self.full_dose and item['release_time'] != "2021-02-22":
                    response.meta['need_break'] = False
                else:
                    item['announcement_title'] = obj.xpath('./a/text()').extract_first()  # 标题
                    item['origin_url'] = "https://ggzyfw.beijing.gov.cn/" + obj.xpath('./a/@href').extract_first()
                    item['announcement_type'] = response.meta['announcement_type']
                    item['public_announcement_type'] = response.meta['public_announcement_type']
                    item['project_title'] = obj.xpath('./a/@title').extract_first()  # 项目名称
                    announcement_title = obj.xpath('./a/text()').extract_first()
                    try:
                        item['project_area'] = re.findall(r'【(.*?)】', announcement_title)[0]  # 所属地区
                    except:
                        item['project_area'] = ''
                    item['info_type'] = response.meta['info_type']
                    item['item'] = BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiItem()
                    print('item3',item)
                    yield scrapy.Request(url=item['origin_url'],
                                         callback=self.parse_item_new,
                                         meta=item,
                                         dont_filter=True,
                                         )
            if not response.meta['need_break']:
                page = response.meta['page'] + 1
                print('page',page)
                url3 = response.meta['o_url'] + f"_{page}.html"
                print('3翻页的详情页的url', url3)
                yield scrapy.Request(
                    url=url3,
                    callback=self.handle_response3,
                    headers=self.fake_headers(),
                    dont_filter=True,
                    errback=self.handle_error,
                    meta={
                        "need_break": False,
                        "page": page,
                        "announcement_type": response.meta['announcement_type'],
                        "public_announcement_type": response.meta['public_announcement_type'],
                        "info_type": response.meta['info_type'],
                        "o_url": response.meta['o_url'],
                    }
                )

    def handle_response4(self, response):
        print('handle_response4')
        if "页" in response.text:
            objs = response.xpath('//div[@class="content clearfix"]/div[@class="content-list"]/ul/li')
            for obj in objs:
                item = dict()
                if obj.xpath('./div[@class="list-times1"]/p/text()').extract_first():
                    item['release_time'] = obj.xpath('./div[@class="list-times1"]/p/text()').extract_first()#正常只有1个时间
                else:
                    item['release_time'] = obj.xpath('./div[@class="list-times2"]/p/text()').extract_first()#中标人候选公示有两个时间，所以取第一个
                item['announcement_title'] = obj.xpath('./a/text()').extract_first()#标题
                item['origin_url'] = "https://ggzyfw.beijing.gov.cn/"+ obj.xpath('./a/@href').extract_first()
                a = "https://ggzyfw.beijing.gov.cn/"+ obj.xpath('./a/@href').extract_first()
                item['announcement_type'] = response.meta['announcement_type']
                item['public_announcement_type'] = response.meta['public_announcement_type']
                item['project_title'] = obj.xpath('./a/@title').extract_first()#项目名称
                project_title = obj.xpath('./a/text()').extract_first()
                item['project_area'] = re.findall(r'【(.*?)】', project_title)[0]#所属地区
                item['info_type'] = response.meta['info_type'],
                item['item'] = BeiJingShiGongGongZiYuanJiaoYiFuWuPingTaiItem()
                print('item4', item)
                yield scrapy.Request(url=a,
                                     callback=self.parse_item_new,
                                     meta=item,
                                     dont_filter=True,
                                     )
        #翻页
            if not response.meta['need_break']:
                page = response.meta['page'] + 1
                print('page', page)
                url = response.meta['o_url'] + f"_{page}" + ".jspx?c1=jyxx&" + response.meta['c2'] + response.meta['c3'] + response.meta['c4'] + "&channelId=" + response.meta['channelId'] + "&q=",
                print('4翻页的详情页的url', url)
                yield scrapy.Request(
                    url=url,
                    callback=self.handle_response4,
                    headers=self.fake_headers(),
                    dont_filter=True,
                    errback=self.handle_error,
                    meta={
                        "need_break": False,
                        "page": page,
                        "announcement_type": response.meta['announcement_type'],
                        "public_announcement_type": response.meta['public_announcement_type'],
                        "info_type": response.meta['info_type'],
                        "o_url": response.meta['o_url'],
                        'c2': response.meta['c2'],
                        'c3': response.meta['c3'],
                        'c4': response.meta['c4'],
                        'channelId': response.meta['channelId'],
                    }
                )
