import time
from datetime import datetime,timedelta

import scrapy
import pydoc

# from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date
from spidertools.utils.time_utils import get_current_date
from spidertools.utils.pinyin_utils import pinyin
# from spidertools.common_pipeline.base_item import BaseItem
# from utils.standardize_field_utils import check_city_field
from commonresources.inner_utils.standardize_field_utils import check_city_field, check_time_field
from commonresources.spider_items.base_item import BaseItem, convert_dict


class BaseSpider(scrapy.Spider):
    name_zh = '基础爬虫'
    province = "省份"
    city = '城市'

    def __init__(self, full_dose=False, stop_today=False):
        '''
        是否需要进行全量爬取
        :param full_dose:
        '''
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        self.stop_today = stop_today
        super().__init__()

    def check_if_need_break(self, item_day, ):
        '''
        根据日期判断是否需要中断，
        如果item_day不等于当前日期，且full_dose为true的情况下，返回True
        否正返回 false
        :param item_day:
        :return:
        '''
        item_day = check_time_field(item_day)
        current_day = get_current_date()
        datetime_now = datetime.now()
        if self.stop_today:  # 指定到某日日期爬虫【like 2021-01-01】
            if item_day < self.stop_today:
                return True
        elif self.full_dose:  # 全量爬虫【近五年】
            if item_day < str((datetime_now - timedelta(days=365 * 5)).date()):
                return True
        else:  # 增量爬虫【当天】
            if current_day != item_day:
                return True
        return False

    def deal_if_need_break(self, response, item_day):
        if not self.full_dose and item_day != get_current_date():
            response.meta['need_break'] = True
        if self.full_dose and item_day < "2015-12-31":
            response.meta['need_break'] = True

    def deal_if_need_break(self, response, item_day):
        if not self.full_dose and item_day != get_current_date():
            response.meta['need_break'] = True
        if self.full_dose and item_day < "2015-12-31":
            response.meta['need_break'] = True

    def parse_info(self, response):
        '''
        最终详情页函数，主要是将meta里面的内容和一些spider属性封装成item
        :param response:
        :return:
        '''

        item_path = ['commonresources', "spider_items"]
        item_path.append(pinyin(self.province))
        item_path.append(pinyin(self.city))
        item_path.append('items.py')
        item_path.append(pinyin(self.name_zh) + "Item")

        item_path = [item for item in item_path if item != ""]
        try:
            full_class_name = ".".join(item_path)
            item = pydoc.locate(full_class_name)()
        except Exception as e:
            item = BaseItem()

        for key, value in response.meta.items():
            if key in item.fields:
                item[key] = value

        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city
        item['is_parsed'] = 0
        item['html'] = response.text
        item['origin_url'] = response.url
        yield item

    def parse_item_new(self, response):
        """
            本函数使用要求：
            1）response.meta['item'] 字段为传递的定义的items对象
            2）其他无用的信息，不要传递到最后的meta中
            3）meta中自带的信息'depth'/'download_timeout'/'download_slot'/'download_latency'/'proxy'，会被忽略
            4)如果是pdf/jpg等内容，请在上级请求前的meta字段中添加，is_pdf=1/is_jpg=1的字段
            5)如果是多级的内容，前一级url页面请保存在html字段中
        """
        item = response.meta['item']
        for obj in response.meta:
            try:
                if obj not in ['depth', 'download_timeout', 'download_slot', 'download_latency', 'proxy', 'retry_times',
                               'item', "is_pdf", "is_jpg"]:
                    item[obj] = response.meta[obj]
            except Exception as e:
                with open('./import.log', "w+", encoding='utf-8') as f:
                    f.write(obj + str(e))
                print(f"这是新的字段:{e},请予以关注，添加或者忽略")

        if "file_type" in response.meta:  # 字段内容： pdf  / zip  / doc  / jpg
            if len(str(response.body)) < 16000000:  # 16793598:
                item['file_type'] = response.meta["file_type"]
                item['file_content'] = response.body
        else:
            item['html'] = response.text  # [0:200]

        """兼容已开发爬虫"""
        if not "file_type" in response.meta:
            if "is_pdf" in response.meta:
                if len(str(response.body)) < 16000000:  # 16793598:  # len(str(response.body))
                    item['is_pdf'] = 1
                    item['pdf_content'] = response.body
            elif "is_jpg" in response.meta:
                item['is_jpg'] = 1
                item['jpg_content'] = response.content
            else:
                item['html'] = response.text  # [0:200]


        if "is_parsed" not in item:
            item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        if 'project_city' in item:
            item['project_city'] = check_city_field(item['project_city'])
            if not item['project_city']:
                del item['project_city']
        yield item

    def parse_item(self, response):
        """
            目前该方法已弃用******，仅供20201130前已开发爬虫使用,后续有时间再修改，请使用parse_item_new()方法
        """
        # 有些网页返回值是pdf，有些含上一层级的html（其中含有内容和pdf链接），还有一些只含有内容
        """某些特殊网站的处理"""
        item = response.meta['item']
        if "html" not in response.meta:
            try:
                item['html'] = response.text  # 针对值返回为pdf
            except Exception as e:
                item['html'] = None
        if "p_html" in response.meta:  # 针对有开标公示：浙江公共资源交易
            item['html'] = "开标公示信息：1》》》开标成员信息：" + response.text + \
                           "2》》》前一网页信息：" + response.meta['p_html']
        if "is_pdf" in response.meta:
            if response.meta['is_pdf']:
                item['is_pdf'] = response.meta['is_pdf']
                item['pdf_content'] = response.body

        item["announcement_title"] = response.meta['announcement_title']
        item["release_time"] = response.meta["release_time"]
        if "project_area" in response.meta:
            item['project_area'] = response.meta['project_area']
        if "project_type" in response.meta:
            item['project_type'] = response.meta['project_type']
        if "info_type" in response.meta:
            item['info_type'] = response.meta['info_type']
        item['origin_url'] = response.meta['origin_url']
        if "construction_type" in response.meta:
            item['announcement_type'] = response.meta['construction_type']
        if "announcement_type" in response.meta:
            item['announcement_type'] = response.meta['announcement_type']
        if ("html" in item and item['html']) or \
                ("pdf_content" in item and item['pdf_content']):
            item['is_parsed'] = 0
            item['source_type'] = self.name_zh
            item['province'] = self.province
            if 'project_city' in response.meta:
                item['project_city'] = response.meta['city']
            yield item
