import urllib.request
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
import threading

start_url = 'http://p.weather.com.cn/zrds/index.shtml'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
}

threads = []

# 请求目标网页，以字符串的方式返回网页代码
def requestPage(url):
    try:
        req = urllib.request.Request(url, headers=headers)
        data = urllib.request.urlopen(req)
        data = data.read()
        # print(UnicodeDammit(data, ['utf-8', 'gbk']).unicode_markup)
    except Exception as err:
        print(err)
    return UnicodeDammit(data, ['utf-8', 'gbk']).unicode_markup

# 使用BeautifulSoup解析网页
def imageSpider(data):
    urls = []
    count = 0
    soup = BeautifulSoup(data, "lxml")
    images = soup.select('img') # 列表list
    for image in images:
        try:
            src = image["src"]
            img_url = urllib.request.urljoin(start_url, src)
            if img_url not in urls:
                print(img_url)
                # 下载图片：
                # 因为下载操作是一个耗时操作，因此要新创建一个线程来专门处理这个操作
                # 介绍进程和线程：
                # 迅雷下载工作 -> 运行在操作系统上，操作系统在内存里创建一个进程（内存空间）来执行所有pycharm的操作
                # 迅雷正在下载一个游戏安装包 -> 迅雷会开启一个线程执行相关的下载操作，线程是由进程分支出来处理任务的
                # 现在执行下载图片操作，要新开一个线程来执行
                count = count + 1
                T = threading.Thread(target=download, args=(img_url, count))
                T.setDaemon(False)
                T.start()
                threads.append(T)
        except Exception as err:
            print(err)


def download(url, count):
    try:
        if(url[len(url)-4] == "."):
            ext = url[len(url) - 4:]
        else:
            ext = '.png'
        req = urllib.request.Request(url, headers=headers)
        data = urllib.request.urlopen(req, timeout=1000)
        data = data.read()
        img_file = open("images\\" + str(count) + ext, "wb")
        img_file.write(data)
        img_file.close()
        print("已下载图片：" + str(count) + ext)
    except Exception as err:
        print(err)

if __name__ == '__main__':
    page = requestPage(start_url)
    imageSpider(page)