import urllib.request
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
import threading

def imageSpider(url):
    global threads
    global count
    try:
        urls = []
        req = urllib.request.Request(url, headers=ua)
        data = urllib.request.urlopen(req)
        data = data.read()
        dammit = UnicodeDammit(data, ["utf-8", "gbk"])
        data = dammit.unicode_markup
        soup = BeautifulSoup(data, "lxml")
        images = soup.select("img")
        for image in images:
            try:
                src = image["src"]
                pic_url = urllib.request.urljoin(url,src)
                if pic_url not in urls:
                    print(pic_url)
                    count = count+1

                    T = threading.Thread(target=download, args=(pic_url, count))
                    T.setDaemon(False)
                    T.start()
                    threads.append(T)


            except Exception as err:
                print(err)
    except Exception as err:
        print(err)
def download(url, count):
    try:
        if(url[len(url)-4]=="."):
            ext=url[len(url)-4:]
        else:
            ext=".png"
        req = urllib.request.Request(url, headers=ua)
        data = urllib.request.urlopen(req, timeout=100)
        data = data.read()
        file_obj = open("images\\" + str(count) + ext,"wb")
        file_obj.write(data)
        file_obj.close()
        print("download" + str(count) + ext)
    except Exception as err:
        print(err)


start_url = "http://movie.douban.com/"

ua = {
 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}


if __name__ == '__main__':
    count=0
    threads=[]
    imageSpider(start_url)