import re
import urllib.request
import os
from colorama import Fore, Style, init

init(autoreset=True)


# ------------------------------
# 1. 定义函数：下载网页
# ------------------------------
def get_html(url):
    headers = {"User-Agent": "Mozilla/5.0"}
    req = urllib.request.Request(url, headers=headers)
    with urllib.request.urlopen(req) as response:
        html = response.read().decode("utf-8", errors="ignore")
    return html


# ------------------------------
# 2. 定义函数：从HTML中提取jpg图片链接
# ------------------------------
def get_jpg_links(html, base_url):
    # 匹配 .jpg 文件链接
    pattern = re.compile(r'src="([^"]+?\.jpg)"', re.IGNORECASE)
    links = pattern.findall(html)

    domain = re.match(r"(https?://[^/]+)", base_url).group(1)

    full_links = []
    for link in links:
        if link.startswith("http"):
            full_links.append(link)
        elif link.startswith("/"):
            full_links.append(domain + link)
        else:
            full_links.append(base_url.rsplit("/", 1)[0] + "/" + link)

    return list(set(full_links))


# ------------------------------
# 3. 定义函数：下载图片
# ------------------------------
def download_images(links, folder="images"):
    if not os.path.exists(folder):
        os.makedirs(folder)
    for i, url in enumerate(links, start=1):
        try:
            filename = os.path.join(folder, f"img_{i}.jpg")
            urllib.request.urlretrieve(url, filename)
            print(Fore.GREEN + f"下载成功: {filename}")
        except Exception as e:
            print(Fore.RED + f"下载失败: {url} ({e})")


# ------------------------------
# 4. 主程序：多页面爬取
# ------------------------------
if __name__ == "__main__":
    base_pages = [
        "https://news.fzu.edu.cn/yxfd.htm",
        "https://news.fzu.edu.cn/yxfd/1.htm",
        "https://news.fzu.edu.cn/yxfd/2.htm",
        "https://news.fzu.edu.cn/yxfd/3.htm",
        "https://news.fzu.edu.cn/yxfd/4.htm",
        "https://news.fzu.edu.cn/yxfd/5.htm",
    ]

    all_links = []
    for page in base_pages:
        print(f"\n正在爬取页面: {page}")
        html = get_html(page)
        links = get_jpg_links(html, page)
        print(f"  找到 {len(links)} 张图片")
        all_links.extend(links)

    # 去重
    all_links = list(set(all_links))

    print(f"\n共提取 {len(all_links)} 张图片，开始下载...\n")
    download_images(all_links)
    print("\n 所有图片下载完成！")