import re

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.guangdong.items import GuangZhouGongGongZiYuanJiaoYiZhongXinItem
from commonresources.spiders.basespider import BaseSpider


class GuangZhouGongGongZiYuanJiaoYiZhongXinSpider(BaseSpider):
    """
        广州公共资源交易中心
                主  页：http://ggzy.gz.gov.cn/
                详情页：http://ggzy.gz.gov.cn/jyywjsgctlgdzbgg/index.jhtml
    """
    name = "GuangZhouGongGongZiYuanJiaoYiZhongXin"
    name_zh = "广州公共资源交易中心"
    province = "广东"
    allowed_domains = ['ggzy.gz.gov.cn']

    def __init__(self, full_dose=False):
        super(GuangZhouGongGongZiYuanJiaoYiZhongXinSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict

    def parse(self, response):
        pass

    def start_requests(self):
        url = 'http://ggzy.gz.gov.cn/jyywjsgcfwjzzbgg/index.jhtml'
        yield scrapy.Request(url=url,
                             callback=self.handle_list_page,
                             dont_filter=True,
                             )

    def handle_list_page(self, response):
        obj_uls = response.xpath('//div[@class="side_nav"]/ul/li')
        for obj_ul in obj_uls:
            project_type = obj_ul.xpath('./a/text()').extract_first()
            if project_type == "发包流程":  # bu yao
                continue
            objs = obj_ul.xpath('./ul//a')
            for obj in objs:
                announcement_type = obj.xpath('./text()').extract_first()

                target_url = obj.xpath('./@href').extract_first()
                if "http://ggzy.gz.gov.cn" not in target_url:
                    target_url = 'http://ggzy.gz.gov.cn' + target_url
                yield scrapy.Request(url=target_url,
                                     callback=self.handle_response,
                                     dont_filter=True,
                                     meta={
                                         'announcement_type': announcement_type,
                                         'target_url': target_url,
                                         'page': 1,
                                         "page_count": -1,
                                         'need_break': False,
                                         'project_type': project_type,
                                     })

    def handle_response(self, response):
        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(re.findall(r'共(\d*)条记录', response.text)[0])
        objs = response.xpath('//div[@class="portlet"]/table//tr')
        for obj in objs[1:]:
            release_time = obj.xpath('./td[3]/span/text()').extract_first()
            if not self.full_dose and release_time != get_current_date():
                response.meta['need_break'] = True
            elif release_time < "2015-12-31":
                response.meta['need_break'] = True
            else:
                item = {}
                item['release_time'] = obj.xpath('./td[3]/span/text()').extract_first()
                item['announcement_title'] = "".join(obj.xpath('./td[2]/a//text()').extract()) \
                    .strip().replace('\n', '').replace('\t', '').replace(' ','')
                item['item'] = GuangZhouGongGongZiYuanJiaoYiZhongXinItem()
                item['announcement_type'] = response.meta['announcement_type']
                item['project_type'] = response.meta['project_type']
                item['origin_url'] = obj.xpath('./td[2]/a/@href').extract_first()
                if 'http://ggzy.gz.gov.cn' not in item['origin_url']:
                    item['origin_url'] = 'http://ggzy.gz.gov.cn' + item['origin_url']
                yield scrapy.Request(url=item['origin_url'],
                                     dont_filter=True,
                                     callback=self.parse_item_new,
                                     meta=item)

        if not response.meta['need_break']:
            page = response.meta['page']
            page_count = response.meta['page_count']
            if page < page_count:
                page += 1
                if page == 2:
                    url = response.url.replace('index', 'index_2')
                else:
                    url = response.url.replace(f'{page - 1}', f'{page}')
                yield scrapy.Request(url=url,
                                     callback=self.handle_response,
                                     dont_filter=True,
                                     meta={
                                         'announcement_type': response.meta['announcement_type'],
                                         'page': page,
                                         "page_count": page_count,
                                         'need_break': False,
                                         'project_type': response.meta['project_type'],
                                     })


