# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import parsel
import scrapy
from bs4 import BeautifulSoup
from gne import GeneralNewsExtractor

from components.custom_item import UpdateCustomItem
from utils.tools import *


class NetTaxPolicyItem(UpdateCustomItem):
    __update_key__ = []

    document_id = scrapy.Field()  # 政策id md5(标题+发布时间)，先对标题和发布时间清洗，唯一标识
    publish_no = scrapy.Field()  # 文号
    title = scrapy.Field()  # 标题
    publish_date = scrapy.Field()  # 发布时间
    department = scrapy.Field()  # 发布部门
    content = scrapy.Field()  # 正文
    source_url = scrapy.Field()  # 原文链接
    source = scrapy.Field()  # 来源网站
    province = scrapy.Field()  # 省份
    city = scrapy.Field()  # 城市
    county = scrapy.Field()  # 区县
    park = scrapy.Field()  # 园区
    state = scrapy.Field()  # 状态
    uk_id = scrapy.Field()  # uk id md5(标题+发布时间+来源+来源地址)，先对标题和发布时间清洗，唯一标识

    def pre_to_db(self):
        """
        入库前的处理
        """
        self.title = self.delete_blank((self.title or '').strip())
        self.publish_date = format_date((self.publish_date or '').strip(), new_format='%Y-%m-%d')
        self.document_id = get_md5(self.title, self.publish_date)
        self.process_title()
        if not self.title or not self.publish_date:
            auto_parse_content_dict = self.parse_title_and_publish_time_by_gen(self.content)
            if not self.title:
                self.title = auto_parse_content_dict.get('title')
            if not self.publish_date:
                self.publish_date = auto_parse_content_dict.get('publish_time')
                self.publish_date = format_date((self.publish_date or '').strip(), new_format='%Y-%m-%d')
        self.content = self.process_content(self.content)
        self.content = del_redundant_blank_character(self.content)
        self.uk_id = get_md5(self.title, self.publish_date, self.source, self.source_url)

    def process_title(self):
        """
        title 补充逻辑
        :return:
        """
        response = parsel.Selector(self.content)
        self.title = self.title or response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        self.title = self.title or response.xpath("""string(//*[@id='contentDiv']/dl/dt)""").get()
        self.title = self.title or response.xpath(""".""").re_first(r"标\s*题\\s*[:：]\s*([\w\W]{1,500}?\s+)")
        self.title = self.title or response.xpath("""string(//h2)""").get()
        self.title = self.title or response.xpath("""string(//h3)""").get()
        self.title = self.title or response.xpath("""string(//h1)""").get()

    @staticmethod
    def parse_title_and_publish_time_by_gen(html, with_body_html=False, **kwargs):
        """
        {
          'title': title,
         'author': author,
         'publish_time': publish_time,
         'content': content[0][1]['text'],
         'images': content[0][1]['images']
         }
        """
        extractor = GeneralNewsExtractor()
        return extractor.extract(html, with_body_html=with_body_html, **kwargs)

    @staticmethod
    def delete_blank(text):
        text = unescape(text)
        text = replace_str(text, r'\u200b', '')
        text = replace_str(text, r'\s+', '')
        return text

    @staticmethod
    def process_content(content):
        content = replace_str(content, "<!--(.|\n)*?-->")
        soup = BeautifulSoup(content, "html.parser")
        for tag in soup.find_all():
            if tag.name in ["img", "script", "style"]:
                tag.decompose()
            else:
                if tag.attrs:
                    for j in ["style", "class", "id"]:
                        if j in tag.attrs:
                            del tag[j]
                if tag.name == "a":
                    if "href" in tag.attrs:
                        href = tag["href"]
                        # new_href = self.replace_oss_url(href)
                        new_href = href
                        tag["href"] = new_href
        content = soup.decode()
        return content
