import urllib.request
from bs4 import  UnicodeDammit
from bs4 import  BeautifulSoup
import threading
def imageSpider(url):
    global threads
    global count
    try:
        urls=[]
        req = urllib.request.Request(url, headers=ua)
        data = urllib.request.urlopen(req)
        data=data.read()
        dammit=UnicodeDammit(data,["utf-8",'abk'])
        data =dammit.unicode_markup
        soup=BeautifulSoup(data,"lxml")
        images=soup.select("img")
        for images in images:
            src=images["src"]
            pic_url=urllib.request.urljoin(url,src)
            if pic_url not in urls:
                print(pic_url)
                count=count+1
                t=threading.Thread(target=download,args=(pic_url,count))
                t.setDaemon(False)
                t.start()
                threads.append(t)
    except Exception as err:
        print(err)

def download(url,count):
    try:
        if(url[len(url)-4]=="."):
            ext=url[len(url)-4:]
        else:
            ext=".png"
        req=urllib.request.Request(url,headers=ua)
        data=urllib.request.urlopen(req,timeout=100)
        data=data.read()
        file_ojb=open("images\\"+str(count)+ext,"wb")
        file_ojb.write(data)
        file_ojb.close()
        print("download"+str(count)+ext)
    except Exception as err:
        print(err)



start_url="https://www.douban.com"

ua={
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}

if __name__ == '__main__':
    count=0
    threads=[]
    imageSpider(start_url)