# -- coding: utf-8 --
import re
import json
import scrapy
from spidertools.utils.time_utils import get_current_date
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.hubei.items import HuBeiShengGongGongZiYuanDianZiJiaoYiFuWuXiTongItem
from commonresources.spiders.basespider import BaseSpider


class HuBeiShengGongGongZiYuanDianZiJiaoYiFuWuXiTongSpider(BaseSpider):
    """
        湖北省公共资源电子交易服务系统     https://www.hbggzyfwpt.cn/
    """

    def parse(self, response):
        pass

    name = 'HuBeiShengGongGongZiYuanDianZiJiaoYiFuWuXiTong'
    name_zh = "湖北省公共资源电子交易服务系统"
    province = "湖北"
    allowed_domains = ['www.hbggzyfwpt.cn']

    # start_urls = ["https://www.hbggzyfwpt.cn/jyxx/jsgcZbgg",
    #               "https://www.hbggzyfwpt.cn/jyxx/jsgcZbjggs"]

    def __init__(self, full_dose=True):
        self.base_url = ["https://www.hbggzyfwpt.cn/jyxx/jsgcZbgg",
                         "https://www.hbggzyfwpt.cn/jyxx/jsgcZbjggs"]
        super(HuBeiShengGongGongZiYuanDianZiJiaoYiFuWuXiTongSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
        }
        return headers

    def start_requests(self):
        page = 1
        for page in range(10):
            page += 1
            form_data = {
                'currentPage': str(page),
                'area': '000',
                'industriesTypeCode': '0',
                'scrollValue': '0',
                'bulletinName': '',
                'publishTimeType': '3',
                'publishTimeStart': '',
                'publishTimeEnd': ''
            }
            urls = ["https://www.hbggzyfwpt.cn/jyxx/jsgcZbgg",
                    "https://www.hbggzyfwpt.cn/jyxx/jsgcZbjggs"]
            for url in urls:
                yield scrapy.FormRequest(url=url, callback=self.handle_response,
                                         dont_filter=True, meta={"page": page, "need_break": False}, formdata=form_data)

    def handle_response(self, response):
        url = response.url
        item_com = {}
        if 'Zbjggs' in url:
            item_com["announcement_type"] = '中标公告'
        else:
            item_com["announcement_type"] = '招标公告'
        trs = response.xpath('//div[@class="newListwenzi"]//tr')
        for tr in trs:
            item_com["announcement_title"] = tr.xpath("./td/a/@title").extract()[0]
            item_com["origin_url"] = "https://www.hbggzyfwpt.cn/" + tr.xpath("./td/a/@href").extract()[0]
            item_com["release_time"] = tr.xpath("./td[2]/text()").extract()[0].replace(" ", "").replace("\n", "").replace("\t", "").replace("\r", "")
            yield scrapy.Request(url=item_com["origin_url"], callback=self.parse_3, headers=self.fake_headers,
                                 meta=item_com)

    def parse_3(self, response):
        url_a = response.meta["origin_url"]
        url_html_text = response.text
        a = re.findall(r'<input type="hidden" id="projectCode" name="projectCode" value="(.*?)">', url_html_text)[0]
        form_data = {
            'projectCode': a
        }
        urls = ["https://www.hbggzyfwpt.cn/jyxxAjax/jsgcZbggLiDetail",
                "https://www.hbggzyfwpt.cn/jyxxAjax/jsgcZbjgDetail"]
        for url in urls:
            item_com = {}
            item_com["announcement_title"] = response.meta["announcement_title"]
            item_com["origin_url"] = response.meta["origin_url"]
            item_com["announcement_type"] = response.meta["announcement_type"]
            item_com["release_time"] = response.meta["release_time"]
            yield scrapy.FormRequest(url=url, callback=self.parse_4, headers=self.fake_headers,
                                     dont_filter=True, meta=item_com, formdata=form_data)
        # real_projectCode =

    def parse_4(self, response):
        url = response.text
        j = json.loads(url)
        rows_info_list = j['list']
        for row in rows_info_list:
            item = {}
            item["announcement_title"] = response.meta["announcement_title"]
            item["origin_url"] = response.meta["origin_url"]
            item["announcement_type"] = response.meta["announcement_type"]
            item["release_time"] = response.meta["release_time"]
            item["source_type"] = '湖北省公共资源电子交易服务系统'
            item["is_parsed"] = 0
            if "bulletinContent" in row.keys():
                item['html'] = row['bulletinContent']
            else:
                item['html'] = row['bulletincontent']
            yield item

