"""
微信公众号保存到本地docx
"""
import re
import cv2
import html2text
import requests
import numpy as np
import pypandoc
from bs4 import BeautifulSoup
from lxml import html


def html_save_docx(h1_title, body_content, image_folder="images",save_docx_folder="docx"):
    """
    将网页源码保存为docx
    :param h1_title:
    :param body_content:
    :param image_folder:
    :return:
    """

    def is_image(content, output_path):
        jpeg_magic_number = b"\xFF\xD8"
        png_magic_number = b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
        gif_magic_number = b"\x47\x49\x46\x38"
        jpeg_magic_number_2 = b"RIFF"
        if content.startswith(png_magic_number):
            image = np.asarray(bytearray(content), dtype="uint8")
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            cv2.imwrite(f'{output_path}.png', image)
            return f'{output_path}.png'
        elif content.startswith(jpeg_magic_number_2):
            image = np.asarray(bytearray(content), dtype="uint8")
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            cv2.imwrite(f'{output_path}.jpg', image)
            return f'{output_path}.jpg'
        elif content.startswith(jpeg_magic_number):
            image = np.asarray(bytearray(content), dtype="uint8")
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            cv2.imwrite(f'{output_path}.jpg', image)
            return f'{output_path}.jpg'
        elif content.startswith(gif_magic_number):
            with open(f'{output_path}.gif', 'wb') as f:
                # 写入获取到的内容
                f.write(content)
            return f'{output_path}.gif'
        else:
            return False

    def download_image(url, output_path):
        response = requests.get(url)  # 替换url为你要下载的URL
        file_type = is_image(response.content, output_path)
        return file_type

    def convert_markdown_images(text, image_folder):
        image_pattern = re.compile(r'!\[.*\]\((https.*?)\)')
        matches = image_pattern.findall(text)
        for mun, match in enumerate(matches):
            url = match
            # 下载图片并保存到本地
            new_url = download_image(url, f"{image_folder}/{mun}")
            # 替换图片URL为本地路径
            if new_url is False:
                continue
            text = text.replace(url, new_url)
        return text
    html_content = f"<html><head><meta charset='UTF-8'><title>{h1_title}</title></head><body><h1>{h1_title}</h1>{body_content}</body></html>"
    # 使用BeautifulSoup解析HTML内容

    markdown_content = html2text.html2text(html_content, bodywidth=0)
    converted_text = convert_markdown_images(markdown_content, image_folder)

    save_title = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s]', '', h1_title) + '.docx'
    save_title = save_title.replace('\n','').replace('\t','').replace(' ','')
    pypandoc.convert_text(converted_text, 'docx', format='markdown', outputfile=save_docx_folder+'/'+save_title)



url = "https://mp.weixin.qq.com/s?__biz=Mzg3NDcwNjIwOQ==&mid=2247548819&idx=1&sn=b17debaf4d668c5fa09f9b3eb1b60069&chksm=cf1675fe35b4844dbdff149991b09a2101a238118045a4c7cc7d67adf7ca82c1515001f6afe8&xtrack=1&scene=90&subscene=93&sessionid=1721976464&flutter_pos=1&clicktime=1721976498&enterid=1721976498&finder_biz_enter_id=4&ranksessionid=1721976452&ascene=56&fasttmpl_type=0&fasttmpl_fullversion=7309920-zh_CN-zip&fasttmpl_flag=0&realreporttime=1721976498848&devicetype=android-34&version=28003282&nettype=WIFI&abtest_cookie=AAACAA%3D%3D&lang=zh_CN&session_us=gh_1b8a6e1dcd6f&countrycode=CN&exportkey=n_ChQIAhIQ4ANPrgfi6It0jhIaOWMkCRLoAQIE97dBBAEAAAAAACy7LRsAtz0AAAAOpnltbLcz9gKNyK89dVj06Gw8z%2B52kQIwVMItjkjndkw6nZlNKTDBVF7KX2Qe2lwpslbqb0W0Q14ZYgxObVL1xpZQ93stwuFtjTqsVFLZjy2V%2Bb%2BtMXmB90cUXDxYfTU3lUqX48LOJGQMDveSP8XkBt847eWdF0sTtKw%2Bg7%2FFF%2BgXYXRY%2BgFeiptyrfJS5S%2FR61cJvulg9ohJmtYE2u7UXLlmJgBxWE1HzcomV6tGZsP0r1HOcbln1ns2wPUoNKl4NLgxaHnaKc6wltfvc64AsyY%3D&pass_ticket=tdmkGwvzaGrWdd4HEiTOueHYl02pqrNmJoJat1YBl0q2KSendLhzob69tGI%2BMgB5&wx_header=3"
req=requests.get(url)





tree = html.fromstring(req.content)

elements = tree.xpath('//div[contains(@id, "js_content")]')
elements_title = tree.xpath('//h1[contains(@class, "rich_media_title")]')



html_content = html.tostring(elements[0], pretty_print=True, encoding='unicode')

soup = BeautifulSoup(html_content, 'html.parser')
# 遍历每个<img>标签
for img_tag in soup.find_all('img'):
    # 检查该<img>标签是否有data-src属性
    if img_tag.has_attr('data-src'):
        # 将data-src属性改为sec
        img_tag['src'] = img_tag['data-src']
        del img_tag['data-src']  # 删除原来的data-src属性

html_content = str(soup.prettify())

html_save_docx(h1_title=elements_title[0].text, body_content=html_content, image_folder="images",save_docx_folder="docx")
