import urllib.request
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
import threading

def imageSpider(url):
    global threads
    global count
    try:
        urls = []
        req = urllib.request.Request(url, headers=ua)
        data = urllib.request.urlopen(req)
        data = data.read() #解析网页数据 b: 字节码
        dammit = UnicodeDammit(data,["utf-8","gbk"])
        data = dammit.unicode_markup
        soup = BeautifulSoup(data,'lxml')
        images = soup.select("img")
        for image in images:
            try:
                src = image["src"]
                pic_url = urllib.request.urljoin(url,src)
                if pic_url not in urls:
                    print(pic_url)
                    count = count + 1
                    #下载图片为耗时间操作,因此我们需要用新的线程来处理这部分操作
                    T = threading.Thread(target=download,args=(pic_url, count))
                    T.setDaemon(False)
                    T.start()
                    threads.append(T)

            except Exception as err:
                print(err)
    except Exception as err:
        print(err)
def download(url,count):
    try:
        if(url[len(url)-4]=="."):
            ext=url[len(url)-4:]
        else:
            ext=".png"
        req = urllib.request.Request(url,headers=ua)
        data = urllib.request.urlopen(req,timeout=100)
        data = data.read()
        file_obj = open("images\\" + str(count)+ ext,"wb") #需要新建名为"images"的文件夹
        file_obj.write(data)
        file_obj.close()
        print("download" + str(count) + ext)
    except Exception as err:
        print(err)



start_url = "https://movie.douban.com/"

ua = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
if __name__ == '__main__':
    count=0
    threads = []
    imageSpider(start_url)

