import requests
from bs4 import BeautifulSoup
import os
from datetime import datetime


def create_folder():
    """
    创建以当前时间命名的文件夹
    """
    folder_name = datetime.now().strftime("%Y%m%d_%H%M%S")
    if not os.path.exists(folder_name):
        os.makedirs(folder_name)
    return folder_name


def download_image(url, folder):
    """
    下载图片并保存到指定文件夹
    :param url: 图片的 URL
    :param folder: 保存图片的文件夹
    """
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
    }
    try:
        response = requests.get(url, timeout=10, headers=headers)
        if response.status_code == 200:
            image_name = url.split("/")[-1]
            image_path = os.path.join(folder, image_name)
            with open(image_path, 'wb') as f:
                f.write(response.content)
    except Exception as e:
        print(f"Error downloading image {url}: {e}")


def save_text_to_file(text, title, folder):
    """
    将文章标题和内容保存到文本文件
    :param text: 文章内容
    :param title: 文章标题
    :param folder: 保存文件的文件夹
    """
    file_name = os.path.join(folder, f"{title}.txt")
    with open(file_name, 'w', encoding='utf-8') as f:
        f.write(text)


def main():
    folder = create_folder()
    # 这里假设你已经获取到公众号文章的页面 URL
    url = "https://mp.weixin.qq.com/s/aR2bOrDPQT_H9iVT9Uj41w"
    headers = {
        # 'referer': 'https://mp.weixin.qq.com',
        'cookie': 'pgv_pvid=6670082751; RK=WMxp6iw+3H; ptcz=831e2d5114bbf9b46ee7956fedb62717ee910417ecd992f3e0027f034213caf1; o_cookie=2925851543; pac_uid=1_2925851543; iip=0; tvfe_boss_uuid=94828b35f56c4131; LW_uid=01d6E8a1d0T8Y6S87134I123O2; eas_sid=J116c8t1G078b6f8N1u4m24059; LW_sid=6166y891k1d2s4h7v9M5A8K6e8; rewardsn=; wxtokenkey=777; wwapp.vid=; wwapp.cst=; wwapp.deviceid=',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48'
    }
    try:
        response = requests.get(url, timeout=10, headers=headers)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            img_list = soup.find_all('img')
            for img in img_list:
                print(img)
                download_image(img, folder)

    except Exception as e:
        print(f"Error occurred: {e}")


if __name__ == "__main__":
    main()