# -- coding: utf-8 --
import time
import requests
import re
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.inner_utils.standardize_field_utils import check_city_field, check_time_field
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.shanxi.items import ShanXiShengJiaoTongYunShuTingItem
from commonresources.spiders.basespider import BaseSpider


class ShanXiShengJiaoTongYunShuTingSpider(BaseSpider):
    name = "ShanXiShengJiaoTongYunShuTing"
    name_zh = "山西省交通运输厅"
    province = "山西"
    allowed_domains = ['jtyst.shanxi.gov.cn']

    # start_urls = ["http://jtyst.shanxi.gov.cn/zbgg/index.jhtml",
    #               "http://jtyst.shanxi.gov.cn/zbjggs/index.jhtml"]

    def __init__(self, full_dose=True):
        self.base_url = ["http://jtyst.shanxi.gov.cn/zbgg/index_",
                         "http://jtyst.shanxi.gov.cn/zbjggs/index_"]
        # self.base_url = "http://jtyst.shanxi.gov.cn/zbgg/index_"
        self.convert_dict = {'com_num': "索引号"}.update(convert_dict)
        super(ShanXiShengJiaoTongYunShuTingSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    def parse1(self, response):
        pass

    """
    title = response.xpath("//td[@class='info']/ul/li[1]")
    com_num = response.xpath("//td[@class='info']/ul/li[2]")
    com_type = response.xpath("//td[@class='info']/ul/li[3]")
    com_time = response.xpath("//td[@class='info']/ul/li[5]")
    com_url = response.xpath("//td[@class='info']//@href[1]")
    max_page = response.xpath("//a[@class='Num'][5]")
    """

    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
        }
        return headers

    def start_requests(self):
        page = 0
        urls = [f"http://jtyst.shanxi.gov.cn/zbgg/index_{page}.jhtml",
                f"http://jtyst.shanxi.gov.cn/zbjggs/index_{page}.jhtml"]
        for url in urls:
            yield scrapy.Request(url=url,
                                 callback=self.handle_response,
                                 dont_filter=True, meta={"page": page, "need_break": False}
                                 )

    def handle_response(self, response):
        page = response.meta["page"]
        url_page = int(response.xpath("//a[@class='Num'][5]/text()").extract()[0])
        trs = response.xpath('//div[@class="dataBox"]//tr')[1:]
        for tr in trs:
            item_com = {}
            item_com["release_time"] = tr.xpath("./td[last()]/text()").extract()[0]
            if item_com["release_time"] < get_current_date() and not self.full_dose:
                response.meta["need_break"] = True
            else:
                item_com["com_num"] = tr.xpath('./td[1]//text()').extract()[0]
                item_com["origin_url"] = tr.xpath("./td[2]/a/@href").extract()[0]
                item_com["announcement_title"] = tr.xpath("./td[2]/ul/li/text()").extract()[0]
                item_com["announcement_type"] = tr.xpath("./td[2]/ul/li[3]/text()").extract()[0]
                item_com["item"] = ShanXiShengJiaoTongYunShuTingItem()
                yield scrapy.Request(url=item_com["origin_url"], callback=self.parse_item_new, meta=item_com)
        if not response.meta["need_break"]:
            if page <= url_page:
                page += 1
                yield scrapy.Request(url=self.base_url + f"{page}" + ".jhtml", callback=self.handle_response,
                                     headers=self.fake_headers, meta={"page": page, "need_break": False})
