# -- coding: utf-8 --
import time
import requests
import re
import scrapy
from spidertools.utils.time_utils import get_current_date
from commonresources.inner_utils.standardize_field_utils import check_city_field, check_time_field
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.xian.items import ShanXiShengCaiGouYuZhaoBiaoWangItem
from commonresources.spiders.basespider import BaseSpider


class ShanXiShengCaiGouYuZhaoBiaoWangSpider(BaseSpider):
    name = "ShanXiShengCaiGouYuZhaoBiaoWang"
    name_zh = "陕西省采购与招标网"
    province = "陕西"
    allowed_domains = ['bulletin.sntba.com']

    def __init__(self, full_dose=True):
        self.base_url = [
            "http://bulletin.sntba.com/xxfbcmses/search/qualify.html?searchDate=1996-05-27&dates=300&word=&categoryId=92&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=",
            "http://bulletin.sntba.com/xxfbcmses/search/bulletin.html?searchDate=1996-05-27&dates=300&word=&categoryId=88&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=",
            "http://bulletin.sntba.com/xxfbcmses/search/candidate.html?searchDate=1996-05-27&dates=300&word=&categoryId=91&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=",
            "http://bulletin.sntba.com/xxfbcmses/search/result.html?searchDate=1996-05-27&dates=300&word=&categoryId=90&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=",
            "http://bulletin.sntba.com/xxfbcmses/search/change.html?searchDate=1996-05-27&dates=300&word=&categoryId=89&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=",
        ]

        super(ShanXiShengCaiGouYuZhaoBiaoWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    def parse1(self, response):
        pass

    """
    xpath = "//html/body/table/tbody/tr"
    title = response.xpath("./td[1]/a")
    com_time = response.xpath("./td[5]")
    com_url = response.xpath("./td[1]/a/@href")
    """

    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
        }
        return headers

    def start_requests(self):
        page = 1
        """
        资格预审公告 http://bulletin.sntba.com/xxfbcmses/search/qualify.html?searchDate=1996-05-27&dates=300&word=&categoryId=92&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=1
        招标公告 http://bulletin.sntba.com/xxfbcmses/search/bulletin.html?searchDate=1996-05-27&dates=300&word=&categoryId=88&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=1
        中标候选人公示 http://bulletin.sntba.com/xxfbcmses/search/candidate.html?searchDate=1996-05-27&dates=300&word=&categoryId=91&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=1
        中标结果公示 http://bulletin.sntba.com/xxfbcmses/search/result.html?searchDate=1996-05-27&dates=300&word=&categoryId=90&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=1
        更正公告公示 http://bulletin.sntba.com/xxfbcmses/search/change.html?searchDate=1996-05-27&dates=300&word=&categoryId=89&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page=1
        """
        urls = [f"http://bulletin.sntba.com/xxfbcmses/search/qualify.html?searchDate=1996-05-27&dates=300&word=&categoryId=92&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page={page}.html",
                f"http://bulletin.sntba.com/xxfbcmses/search/bulletin.html?searchDate=1996-05-27&dates=300&word=&categoryId=88&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page={page}.html",
                f"http://bulletin.sntba.com/xxfbcmses/search/candidate.html?searchDate=1996-05-27&dates=300&word=&categoryId=91&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page={page}.html",
                f"http://bulletin.sntba.com/xxfbcmses/search/result.html?searchDate=1996-05-27&dates=300&word=&categoryId=90&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page={page}.html",
                f"http://bulletin.sntba.com/xxfbcmses/search/change.html?searchDate=1996-05-27&dates=300&word=&categoryId=89&industryName=&area=&status=&publishMedia=&sourceInfo=&showStatus=&page={page}.html"
                ]
        for url in urls:
            yield scrapy.Request(url=url,
                                 callback=self.handle_response,
                                 dont_filter=True, meta={"page": page, "need_break": False}
                                 )

    def handle_response(self, response):
        item_com = {}
        url = response.url
        """
         资格预审公告:qualify.html
         招标公告:bulletin.html
         中标候选人公示:candidate.html
         中标结果公示:result.html
         更正公告公示:change.html
        """
        if "qualify.html" in url:
            item_com["announcement_type"] = "资格预审公告"
        elif "bulletin.html" in url:
            item_com["announcement_type"] = "招标公告"
        elif "candidate.html" in url:
            item_com["announcement_type"] = "中标候选人公示"
        elif "result.html" in url:
            item_com["announcement_type"] = "中标结果公示"
        elif "change.html" in url:
            item_com["announcement_type"] = "更正公告公示"
        page = response.meta["page"]
        j = 1
        url_page_old = response.xpath('/html/body/div[2]/label[1]/text()').extract()[0]
        url_page = int(url_page_old)
        trs = response.xpath('/html/body/table//tr')[1:]
        for tr in trs:
            item_com["release_time"] = str(tr.xpath("./td[5]/text()").extract()[0]).replace("\r", "").replace("\n", "").replace("\t", "")
            if item_com["release_time"] < get_current_date() and not self.full_dose:
                response.meta["need_break"] = True
            else:
                item_com["origin_url"] = str(tr.xpath(".//a/@href").extract()[0]).replace("javascript:urlOpen('", "").replace("')", "")
                item_com["announcement_title"] = str(tr.xpath("./td[1]/a/text()").extract()[0]).replace("\r", "").replace("\n", "").replace("\t", "")
                item_com["item"] = ShanXiShengCaiGouYuZhaoBiaoWangItem()
                yield scrapy.Request(url=item_com["origin_url"], callback=self.parse_item_new, meta=item_com)
        if not response.meta["need_break"]:
            if page <= url_page:
                page += 1
                for i in self.base_url:
                    yield scrapy.Request(url=i + f"{page}" + ".html", callback=self.handle_response,
                                         headers=self.fake_headers, meta={"page": page, "need_break": False})
