# -- coding: utf-8 --
# -- coding: utf-8 --
import re
import json
import scrapy
import requests
from spidertools.utils.time_utils import get_current_date
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.hunan.items import HuNanShengZhaoBiaoTouBiaoJianGuanWangItem
from commonresources.spiders.basespider import BaseSpider
from scrapy.selector import Selector


class HuNanShengZhaoBiaoTouBiaoJianGuanWangSpider(BaseSpider):
    """
        湖南省招标投标监管网     http://bidding.hunan.gov.cn/
    """

    name = 'HuNanShengZhaoBiaoTouBiaoJianGuanWang'
    name_zh = "湖南省招标投标监管网"
    province = "湖南"
    allowed_domains = ['bidding.hunan.gov.cn']
    start_urls = ['http://bidding.hunan.gov.cn/bidding/notice?categoryId=90',
                  'http://bidding.hunan.gov.cn/bidding/notice?categoryId=88']

    def __init__(self, full_dose=True):
        super(HuNanShengZhaoBiaoTouBiaoJianGuanWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用
        self.nodes = []

    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
        }
        return headers

    def parse(self, response):
        sel = Selector(response)
        num_root_nods = sel.xpath('//div[@class="pagination-warpper"]')
        for node in num_root_nods:
            node_pu = str(node.xpath('.//span/text()').extract()[0].replace('共', '').
                          replace("条", "").replace(" ", ""))
            self.nodes.append(node_pu)
        list_num = self.nodes
        list_num_type = ["88", "90"]
        tan = [v_0 for v_0 in list_num]
        san = [value for value in range(1, 800)]
        # san = [value for value in range(1, 10)]
        tan_type = [v_1 for v_1 in list_num_type]
        for value in san:
            for v_0 in tan:
                for value_type in tan_type:
                    url = "http://bidding.hunan.gov.cn/ztb/api/getBiddingList?totalCount=" + str(v_0) + \
                           "&limit=10&page=" + str(value) + "&categoryId=" + str(value_type) + "&areaNo="
                    yield scrapy.Request(url=url, callback=self.handle_response)

    def handle_response(self, response):
        url = response.url
        r = requests.get(url).text
        response_obj = json.loads(r)
        j = response_obj["page"]
        j = j.get('list')
        for row in j:
            item = {}
            item["announcement_title"] = row['noticeTitle']
            item["release_time"] = row["createTime"][:10]
            if "88" in row["noticeType"]:
                item['announcement_type'] = "招标公告"
            else:
                item['announcement_type'] = "中标公告"
            url_1 = row["id"]
            item["origin_url"] = 'http://bidding.hunan.gov.cn/bidding/notice/' + url_1 + "?isdetail=1"
            url_new = "http://bidding.hunan.gov.cn/ztbPdf/" + row['bidFilePath']
            if "project" in url_new:
                item["file_type"] = 'pdf'
                item["item"] = HuNanShengZhaoBiaoTouBiaoJianGuanWangItem()
                yield scrapy.Request(url=url_new, callback=self.parse_item_new, meta=item)
            else:
                item["item"] = HuNanShengZhaoBiaoTouBiaoJianGuanWangItem()
                yield scrapy.Request(url=item["origin_url"], callback=self.parse_item_new, meta=item)
