'''''
批量下载豆瓣首页的图片

采用伪装浏览器的方式爬取豆瓣网站首页的图片，保存到指定路径文件夹下
'''

# 导入所需的库
import urllib.request, socket, re, sys, os

# 定义文件保存路径
targetPath = "E:\\projects"


def saveFile(path):
    # 检测当前路径的有效性
    if not os.path.isdir(targetPath):
        os.mkdir(targetPath)

        # 设置每个图片的路径
    pos = path.rindex('/')
    t = os.path.join(targetPath, path[pos + 1:])
    return t


# 用if __name__ == '__main__'来判断是否是在直接运行该.py文件


# 网址
#url = "https://www.douban.com/"
#url="https://www.douban.com/photos/album/51003610/"
#url="https://www.douban.com/photos/album/1648228355/"
url="http://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=%E7%B9%81%E6%98%9F%E6%98%A5%E6%B0%B4"
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '                            'Chrome/51.0.2704.63 Safari/537.36'
}

req = urllib.request.Request(url=url, headers=headers)

res = urllib.request.urlopen(req)

data = res.read()
print()
print()
#for link, t in set(re.findall(r'(https:[^s]*?(jpg|png|gif))', str(data))):
for link, t in set(re.findall(r'(https:[^s]*?(jpg|png|gif))', str(data))):
    print(link)
    try:
        urllib.request.urlretrieve(link, saveFile(link))
    except:
        print('失败')