import json
import re

import requests
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.zhejiang.items import ZheJiangShengGongGongZiYuanJiaoYiPingTaiItem
from commonresources.spiders.basespider import BaseSpider


class XXXXXXSpider(BaseSpider):
    """

                主  页：
                详情页：
    """
    name = ""
    name_zh = ""
    province = ""
    allowed_domains = ['']
    start_urls = [""]

    def __init__(self, full_dose=False):
        super(XXXXXXSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    def parse(self, response):
        pass

    # def start_requests(self):
    #     """不使用时，请注释，不然会报错"""
    #     """.replace("\r", "").replace("\n", "").replace(" ", "")"""
    #     pass

    def handle_response(self, response):
        pass

    def faker_headers(self):
        """自行添加其他信息"""
        return {

        }

    def faker_formdata(self):
        return {

        }

    def faker_payloaddata(self):
        return {

        }

    def handle_detail_page(self, response):
        pass

    def handle_pdf_content(self, response):
        """多次跳转：至pdf页，取pdf内容"""
        pass

    def handle_jpg_content(self, response):
        """多次跳转：至jpg页，取jpg内容"""
        pass
