import urllib.request
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
import threading

def imageSpider(url):
        global count
        urls = []
        req = urllib.request.Request(url,headers=ua)
        data=urllib.request.urlopen(req)
        data=data.read()
        dammit =UnicodeDammit(data,["utf-8","gbk"])
        data = dammit.unicode_markup
        soup = BeautifulSoup(data,"lxml")
        images = soup.select("img")
        for images in images:
            try:
                src = images["src"]
                pic_url = urllib.request.urljoin(url,src)
                if pic_url not in urls:
                    print(pic_url)
                    count = count + 1
                    #下载图片是一个耗时操作，因此我们需要用新的线程来处理这部分操作
                    T = threading.Thread(target=download,args=(pic_url,count))
                    T.setDaemon(False)
                    T.start()
                    threading.append(T)
            except Exception as err:
                print(err)

def download(url,count):
    try:
        if(url[len(url)-4=="."]):
            ext=url[len(url)-4:]
        else:
            ext=".png"
        req = urllib.request.Request(url,headers=ua)
        data = urllib.request.urlopen(req,timeout=200)
        data = data.read()
        file_obj = open("images\\"+str(count)+ext,"wb")
        file_obj.write(data)
        file_obj.close()
        print("download"+str(count)+ext)

    except Exception as err:
        print(err)

start_url="https://www.xiaohongshu.com/"

ua={
    'User-Agent':' Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Mobile Safari/537.36'
}


if __name__=='__main__':
    count = 0
    threading = []
    imageSpider(start_url)