# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import base64
import hashlib
import io
import random
import re
import warnings
from urllib.parse import urljoin

import oss2
import requests
import scrapy
from PIL import Image
from bs4 import BeautifulSoup, Comment
from faker import Faker

from components.config import OSS_WFQ_GOV_FILE
from components.custom_item import UpdateCustomItem
from utils.tools import get_md5

warnings.filterwarnings("ignore")
bucket = oss2.Bucket(
    oss2.Auth(OSS_WFQ_GOV_FILE["access_key_id"], OSS_WFQ_GOV_FILE["access_key_secret"]),
    OSS_WFQ_GOV_FILE["endpoint"],
    OSS_WFQ_GOV_FILE["bucket_name"],
)


class NewsItemMixin:
    def fix_body_html(self, body_html, publish_time):
        soup = BeautifulSoup(body_html, "html.parser")

        # 获取所有img标签
        img_tags = soup.find_all("img")

        for tag in soup.find_all("input"):
            tag.decompose()

        for tag in soup.find_all(text=lambda text: isinstance(text, Comment)):
            tag.extract()

        # 遍历所有img标签，并替换链接
        for img in img_tags:
            # 获取img标签的src属性
            img_src = img.get("src")
            # 使用正则表达式替换链接
            new_src = self.upload_image(image_url=img_src, publish_time=publish_time, content=True)
            # 将新链接替换回img标签的src属性
            img["src"] = new_src
        return str(soup)

    @staticmethod
    def is_image_corrupted(img_data):
        try:
            img = Image.open(io.BytesIO(img_data))
            img.verify()
            return True
        except:
            return False

    def upload_image(self, image_url, publish_time, content=False):
        file_name_hash = hashlib.md5(image_url.encode("utf-8")).hexdigest()
        _url = image_url.split("?")
        file_type = "jpg"
        if _url:
            file_type = _url[0].split(".")[-1]
        if "base64," in image_url:
            file_type = "jpg"
        if "http" not in image_url:
            image_url = urljoin(self.url, image_url)
        publish_time = f"{publish_time}"[:10].strip()
        data_path = f"industry_news/{publish_time}/" + file_name_hash + "." + file_type
        if content:
            data_path = f"industry_news/{publish_time}/" + "content/" + file_name_hash + "." + file_type
        new_url = "https://wfq-gov-file.oss-cn-hangzhou.aliyuncs.com/" + data_path
        exist = bucket.object_exists(data_path)
        headers = {"user-agent": Faker().chrome()}
        if not exist:
            for _ in range(3):
                try:
                    proxies = None
                    if "base64," in image_url:
                        img_base64 = image_url.split("base64,")[-1]
                        bucket.put_object(data_path, base64.b64decode(img_base64))
                    else:
                        bucket.put_object(
                            data_path,
                            requests.get(image_url, headers=headers, proxies=proxies, timeout=5, verify=False),
                        )
                    print("upload image to oss success", image_url, new_url)
                    break
                except Exception as e:
                    print("upload image to oss error", image_url, new_url, e)
            if self.is_image_corrupted(requests.get(new_url, timeout=5, verify=False).content) is False:
                return
        return new_url


class NetIndustryNewsItem(UpdateCustomItem, NewsItemMixin):
    __update_key__ = ["body_html", "tags", "body_html"]

    title = scrapy.Field()
    publish_time = scrapy.Field()
    body_html = scrapy.Field()
    tags = scrapy.Field()
    source = scrapy.Field()
    url = scrapy.Field()
    image_list = scrapy.Field()
    document_id = scrapy.Field()

    def pre_to_db(self):
        self.title = re.sub(r"\s+", "", self.title)
        self.document_id = get_md5(self.title + self.publish_time)

        self.body_html = {"content": self.fix_body_html(self.body_html, self.publish_time)}
        image_list_tmp = self.image_list
        image_list_last = []
        for image in image_list_tmp or []:
            image_list_last.append(self.upload_image(image, self.publish_time))
        image_list_last = [i for i in image_list_last if i]
        self.image_list = image_list_last


class NetTaxNewsItem(UpdateCustomItem, NewsItemMixin):
    __update_key__ = ["body_html", "tags", "body_html"]
    document_id = scrapy.Field()
    title = scrapy.Field()
    publish_time = scrapy.Field()
    body_html = scrapy.Field()
    tags = scrapy.Field()
    source = scrapy.Field()
    url = scrapy.Field()
    image_list = scrapy.Field()

    def pre_to_db(self):
        self.title = re.sub(r"\s+", "", self.title)
        self.document_id = get_md5(self.title + self.publish_time)

        self.body_html = {"content": self.fix_body_html(self.body_html, self.publish_time)}
        image_list_tmp = self.image_list
        image_list_last = []
        for image in image_list_tmp or []:
            image_list_last.append(self.upload_image(image, self.publish_time))
        image_list_last = [i for i in image_list_last if i]
        self.image_list = image_list_last
