# -- coding: utf-8 --
import time
import requests
import re
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.inner_utils.standardize_field_utils import check_city_field, check_time_field
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.xian.items import ShanXiGongGongZiYuanJiaoYiZhongXinItem
from commonresources.spiders.basespider import BaseSpider


class ShanXiGongGongZiYuanJiaoYiZhongXinSpider(BaseSpider):
    name = "ShanXiGongGongZiYuanJiaoYiZhongXin"
    name_zh = "陕西公共资源交易中心"
    province = "陕西"
    allowed_domains = ['www.sxggzyjy.cn']

    def __init__(self, full_dose=True):
        self.base_url = [
            "http://www.sxggzyjy.cn/jydt/001001/001001001/001001001001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001001/001001001002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001001/001001001004/",
            "http://www.sxggzyjy.cn/jydt/001001/001001001/001001001005/",
            "http://www.sxggzyjy.cn/jydt/001001/001001001/001001001003/",
            "http://www.sxggzyjy.cn/jydt/001001/001001002/001001002001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001002/001001002003/",
            "http://www.sxggzyjy.cn/jydt/001001/001001002/001001002002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001003/001001003001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001003/001001003002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001004/001001004001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001004/001001004002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001004/001001004003/",
            "http://www.sxggzyjy.cn/jydt/001001/001001004/001001004004/",
            "http://www.sxggzyjy.cn/jydt/001001/001001006/001001006001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001006/001001006002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001006/001001006003/",
            "http://www.sxggzyjy.cn/jydt/001001/001001006/001001006004/",
            "http://www.sxggzyjy.cn/jydt/001001/001001008/001001008001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001008/001001008002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001013/001001013001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001013/001001013002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001014/001001014001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001014/001001014002/",
            "http://www.sxggzyjy.cn/jydt/001001/001001012/001001012001/",
            "http://www.sxggzyjy.cn/jydt/001001/001001012/001001012002/",
        ]
        # self.convert_dict = {'com_num': "索引号"}.update(convert_dict)
        super(ShanXiGongGongZiYuanJiaoYiZhongXinSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    def parse1(self, response):
        pass

    """
    xpath = "//*[@id="categorypagingcontent"]/ul/li"
    title = response.xpath("./a")
    com_time = response.xpath("./span")
    com_url = response.xpath("./a/@href")
    """

    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
        }
        return headers

    def start_requests(self):
        page = 2
        urls = [f"http://www.sxggzyjy.cn/jydt/001001/001001001/001001001001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001001/001001001002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001001/001001001004/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001001/001001001005/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001001/001001001003/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001002/001001002001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001002/001001002003/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001002/001001002002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001003/001001003001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001003/001001003002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001004/001001004001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001004/001001004002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001004/001001004003/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001004/001001004004/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001006/001001006001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001006/001001006002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001006/001001006003/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001006/001001006004/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001008/001001008001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001008/001001008002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001013/001001013001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001013/001001013002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001014/001001014001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001014/001001014002/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001012/001001012001/{page}.html",
                f"http://www.sxggzyjy.cn/jydt/001001/001001012/001001012002/{page}.html"
                ]
        for url in urls:
            yield scrapy.Request(url=url,
                                 callback=self.handle_response,
                                 dont_filter=True, meta={"page": page, "need_break": False}
                                 )

    def handle_response(self, response):
        item_com = {}
        url = response.url
        if "/jydt/001001/001001001/001001001001" in url:
            item_com["announcement_type"] = "招标/资审公告"
            item_com["project_type"] = "工程建设项目招标投标"
        elif "/jydt/001001/001001001/001001001002" in url:
            item_com["announcement_type"] = "澄清/变更公告"
            item_com["project_type"] = "工程建设项目招标投标"
        elif "/jydt/001001/001001001/001001001004" in url:
            item_com["announcement_type"] = "流标/终止公告"
            item_com["project_type"] = "工程建设项目招标投标"
        elif "/jydt/001001/001001001/001001001005" in url:
            item_com["announcement_type"] = "中标候选人公示"
            item_com["project_type"] = "工程建设项目招标投标"
        elif "/jydt/001001/001001001/001001001003" in url:
            item_com["announcement_type"] = "中标/成交公示"
            item_com["project_type"] = "工程建设项目招标投标"
        elif "/jydt/001001/001001002/001001002001" in url:
            item_com["announcement_type"] = "交易公告"
            item_com["project_type"] = "土地使用权和矿业权出让"
        elif "/jydt/001001/001001002/001001002003" in url:
            item_com["announcement_type"] = "变更公告"
            item_com["project_type"] = "土地使用权和矿业权出让"
        elif "/jydt/001001/001001002/001001002002" in url:
            item_com["announcement_type"] = "中标/成交公示"
            item_com["project_type"] = "土地使用权和矿业权出让"
        elif "/jydt/001001/001001003/001001003001" in url:
            item_com["announcement_type"] = "交易公告"
            item_com["project_type"] = "国有产权交易"
        elif "/jydt/001001/001001003/001001003002" in url:
            item_com["announcement_type"] = "中标/成交公示"
            item_com["project_type"] = "国有产权交易"
        elif "/jydt/001001/001001004/001001004001" in url:
            item_com["announcement_type"] = "采购公告"
            item_com["project_type"] = "政府采购"
        elif "/jydt/001001/001001004/001001004002" in url:
            item_com["announcement_type"] = "更正公告"
            item_com["project_type"] = "政府采购"
        elif "/jydt/001001/001001004/001001004003" in url:
            item_com["announcement_type"] = "结果公告"
            item_com["project_type"] = "政府采购"
        elif "/jydt/001001/001001004/001001004004" in url:
            item_com["announcement_type"] = "废标/终止公告"
            item_com["project_type"] = "政府采购"
        elif "/jydt/001001/001001006/001001006001" in url:
            item_com["announcement_type"] = "招标公告"
            item_com["project_type"] = "药品、医用耗材及二类疫苗"
        elif "/jydt/001001/001001006/001001006002" in url:
            item_com["announcement_type"] = "通知"
            item_com["project_type"] = "药品、医用耗材及二类疫苗"
        elif "/jydt/001001/001001006/001001006003" in url:
            item_com["announcement_type"] = "药品公示公告"
            item_com["project_type"] = "药品、医用耗材及二类疫苗"
        elif "/jydt/001001/001001006/001001006004" in url:
            item_com["announcement_type"] = "耗材公示公告"
            item_com["project_type"] = "药品、医用耗材及二类疫苗"
        elif "/jydt/001001/001001008/001001008001" in url:
            item_com["announcement_type"] = "交易公告"
            item_com["project_type"] = "排污权交易"
        elif "/jydt/001001/001001008/001001008002" in url:
            item_com["announcement_type"] = "中标/成交公示"
            item_com["project_type"] = "排污权交易标"
        elif "/jydt/001001/001001013/001001013001" in url:
            item_com["announcement_type"] = "交易公告"
            item_com["project_type"] = "煤炭产能指标交易"
        elif "/jydt/001001/001001013/001001013002" in url:
            item_com["announcement_type"] = "中标/成交公示"
            item_com["project_type"] = "煤炭产能指标交易"
        elif "/jydt/001001/001001014/001001014001" in url:
            item_com["announcement_type"] = "需求公告"
            item_com["project_type"] = "省天然气合同签订平台"
        elif "/jydt/001001/001001014/001001014002" in url:
            item_com["announcement_type"] = "结果公示"
            item_com["project_type"] = "省天然气合同签订平台"
        elif "/jydt/001001/001001012/001001012001" in url:
            item_com["announcement_type"] = "交易公告"
            item_com["project_type"] = "其他"
        elif "/jydt/001001/001001012/001001012002" in url:
            item_com["announcement_type"] = "中标/成交公示"
            item_com["project_type"] = "其他"
        page = response.meta["page"]
        url_page_old = response.xpath('//*[@id="index"]/text()').extract()[0]
        a = url_page_old.split('/', 2)
        url_page = int(a[1])
        trs = response.xpath('//*[@id="categorypagingcontent"]/ul/li')
        for tr in trs:
            item_com["release_time"] = tr.xpath("./span/text()").extract()[0]
            if item_com["release_time"] < get_current_date() and not self.full_dose:
                response.meta["need_break"] = True
            else:
                item_com["origin_url"] = "http://www.sxggzyjy.cn" + tr.xpath("./a/@href").extract()[0]
                item_com["announcement_title"] = str(tr.xpath("./a/text()").extract()[0]).replace("\r", "").replace(
                    "\n", "").replace("\t", "")
                item_com["item"] = ShanXiGongGongZiYuanJiaoYiZhongXinItem()
                yield scrapy.Request(url=item_com["origin_url"], callback=self.parse_item_new, meta=item_com)
        if not response.meta["need_break"]:
            if page <= url_page:
                page += 1
                for i in self.base_url:
                    yield scrapy.Request(url=i + f"{page}" + ".html", callback=self.handle_response,
                                         headers=self.fake_headers, meta={"page": page, "need_break": False})
