#coding:utf-8
from bs4 import BeautifulSoup  #从网页抓取数据
import urllib2,urllib
a = 0
for x in xrange(1,10):
    url = 'http://www.dbmeinv.com/?pager_offset=%d'%x
    # 伪装  针对反爬虫
    def crawl(url):
        headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
        # 用地址创建一个request对象
        req =urllib2.Request(url,headers = headers)
        # 设置超时
        page = urllib2.urlopen(req,timeout = 20)
        # 获取源码
        contents = page.read()
        # print contents
        soup = BeautifulSoup(contents,'html.parser')
        my_girl = soup.find_all('img')
        for girl in my_girl:
            link = girl.get('src')
            # print link
            # 下载的方法
            global a
            urllib.urlretrieve(link,'image/%s.jpg' %a)
            a+=1

    crawl(url)




















# html = '<a class="a" href="http://www.baidu.com"></a><a class="a" href="http://www.4399.com"></a>'
# soup = BeautifulSoup(html,'html.parser')
# a = soup.find_all('a',class_="a")
# for b in a:
#     link = b.get('href')
#     print link

# html =  '<div class="a">可里老师</div><div class="b">锁微老师</div>'
# soup = BeautifulSoup(html,'html.parser')
# e = soup.find('div',class_="b").text
# print e

# # 创建字符串
# html = ''  
# soup = BeautifulSoup(open('a.html'),'html.parser')
# # 打印本地文件的内容
# print soup.prettify()


# html = '<title>同学们真棒</title>'
#  # 创建一个对象，解析网页方式
# soup = BeautifulSoup(html,'html.parser')
# print soup.title