# -*- coding: utf-8 -*-
# @Time : 2020/11/23 11:22
# @Author : zhangxing
# @File : JiangSuShengZhaoBiaoTouBiaoGongGongFuWuPingTai.py
import scrapy
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.jiangsu.items import JiangSuShengZhaoBiaoTouBiaoGongGongFuWuPingTaiItem
from spidertools.utils.time_utils import get_current_date


class JiangSuShengZhaoBiaoTouBiaoGongGongFuWuPingTaiSpider(scrapy.Spider):
    '''
        江苏省招标投标公共服务平台 http://www.jszbtb.com/#/newindex
    '''
    name = 'JiangSuShengZhaoBiaoTouBiaoGongGongFuWuPingTai'
    name_zh = '江苏省招标投标公共服务平台'
    province = "江苏"
    city = ''
    allowed_domains = ['jszbtb.com']
    start_urls = ['http://api.jszbtb.com/PlatformApi/AdministrantionArea']

    def __init__(self, full_dose=False, not_full_type=False):
        """        :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        self.convert_dict = convert_dict
        self.not_full_type = not_full_type  # """full_type: True:是否提取全量类型 False:提取中标公告和招标公告"""
        self.full_dose = full_dose
        super().__init__()

    def parse(self, response):
        objs = [
            ['资格预审公告', 'http://api.jszbtb.com/DataSyncApi/HomeQulifyBulletin', 'QulifyBulletin'],
            ['招标公告', 'http://api.jszbtb.com/DataSyncApi/HomeTenderBulletin', 'TenderBulletin'],
            ['中标候选人公示', 'http://api.jszbtb.com/DataSyncApi/HomeWinCandidateBulletin', 'WinCandidateBulletin'],
            ['中标结果公示', 'http://api.jszbtb.com/DataSyncApi/HomeWinBidBulletin', 'WinBidBulletin'],
            ['更正公告公示', 'http://api.jszbtb.com/DataSyncApi/AmendBulletin', 'AmendBulletin'],
        ]

        citys_list = response.json()['datalist']
        for obj in objs:
            construction_type = obj[0]
            if self.not_full_type and construction_type not in ["招标公告", '中标结果公示']:
                continue
            url_main = obj[1]
            for i in range(0, len(citys_list)):
                city = citys_list[i]['regionname']
                if city == "江苏省":
                    city = '省级别'
                get_code = citys_list[i]['adcode']
                url = f"{url_main}?PageSize=20&CurrentPage=1&RegionCode={get_code}"
                yield scrapy.Request(
                    url=url,
                    callback=self.handle_response,
                    headers={
                        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3776.400 QQBrowser/10.6.4212.40",
                    },
                    meta={
                        'city': city,
                        'construction_type': construction_type,
                        'page': 1,
                        'need_break': False,
                        'page_count': -1,
                        'url_main': obj[1],
                        'item_url': obj[2],
                        'get_code': get_code,
                    },
                    dont_filter=True,

                )

    def handle_response(self, response):
        objs_options = response.json()['data']
        if response.meta['page_count'] == -1:
            num = objs_options['totalNumber']
            if not num:
                response.meta['need_break'] = True
                response.meta['page_count'] = 0
            else:
                response.meta['page_count'] = int(num // 20) + 1
        if response.meta['page_count']:
            objs = objs_options['data']
            for obj in objs:
                item = dict()
                item['release_time'] = obj['create_time'].split('T')[0]
                if not self.full_dose and item['release_time'] != get_current_date():
                    response.meta['need_break'] = True
                elif item['release_time'] < "2015-12-31":
                    response.meta['need_break'] = True
                else:
                    if 'publicityName' in obj:
                        item['announcement_title'] = obj['publicityName']
                    elif 'bulletinName' in obj:
                        item['announcement_title'] = obj['bulletinName']
                    item['construction_type'] = response.meta['construction_type']
                    item['city'] = response.meta['city']
                    id = obj['id']
                    item['first_url'] = f"http://www.jszbtb.com/#/bulletindetail/WinBidBulletin/{id}"
                    item['origin_url'] = f"http://api.jszbtb.com/DataSyncApi/{response.meta['item_url']}/id/{id}"
                    yield scrapy.Request(
                        headers={
                            "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3776.400 QQBrowser/10.6.4212.40",
                        },
                        url=item['origin_url'],
                        callback=self.parse_html,
                        meta=item,
                        dont_filter=True,
                    )
        if not response.meta['need_break']:
            page = response.meta['page']
            page_count = response.meta['page_count']
            if page < page_count:
                page += 1
                url_main = response.meta['url_main']
                get_code = response.meta['get_code']
                url = f"{url_main}?PageSize=20&CurrentPage={page}&RegionCode={get_code}"
                yield scrapy.Request(
                    url=url,
                    callback=self.handle_response,
                    headers={
                        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3776.400 QQBrowser/10.6.4212.40",
                    },
                    meta={
                        'city': response.meta['city'],
                        'construction_type': response.meta['construction_type'],
                        'page': page,
                        'need_break': False,
                        'page_count': page_count,
                        'url_main': url_main,
                        'item_url': response.meta['item_url'],
                        'get_code': get_code,
                    },
                    dont_filter=True,
                )

    def parse_html(self, response):
        item = JiangSuShengZhaoBiaoTouBiaoGongGongFuWuPingTaiItem()
        item['html'] = response.text
        if "first_url" in response.meta:
            item['first_url'] = response.meta['first_url']
        item["announcement_title"] = response.meta['announcement_title']
        item["release_time"] = response.meta["release_time"]
        item['origin_url'] = response.meta['origin_url']
        item["construction_type"] = response.meta['construction_type']
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['project_city'] = response.meta['city']
        yield item
