#coding=utf-8
#author:yan_严聪秦(qq:1727053412)
#动脑学院vip学员
# 爬取
# url = 'http://tieba.baidu.com/p/2460150866'
# 前三页所有的图片
# import requests
# from bs4 import BeautifulSoup
# import os
# from urllib.request import urlretrieve
# import re
# def get_html(url):
#     try:
#         r=requests.get(url)
#         html=r.text
#         return html
#     except Exception as e:
#         print(e)
#
# def get_pic(html,page):
#     soup=r'src="(.+?\.jpg)"pic_ext'
#     rule = re.compile(soup)
#     img_list=re.findall(rule,html)
#     # soup=BeautifulSoup(html,'lxml')
#     # img_list=soup.find_all('img',class_='BED_Image')
#     n=1
#     for img in img_list:
#         url=img.get('src')
#         name='./image/第%d页-%d.jpg'%(page,n)
#         urlretrieve(url,name)
#         n += 1
# def get_all_image():
#     try:
#         for page in range(1,4):
#             url=base_url+'?pn=%d'%page
#             print(url)
#             html=get_html(url)
#             get_pic(html,page)
#
#     except Exception as e:
#         print(e)
#
# if __name__=='__main__':
#     base_url='http://tieba.baidu.com/p/2460158866'
#     get_all_image()








