# coding:utf-8
# Funny (qq:516110288)


# 爬取
# url = 'http://tieba.baidu.com/p/2460150866'
# 前三页所有的图片
import requests
from urllib.request import urlretrieve
import re
from bs4 import BeautifulSoup
import time
#获取网址
def get_code(url):
    try:
        r=requests.get(url)
        html=r.text
        return html
    except Exception as e:
        print(e)
        return None
#获取图片地址
    #< img
    pic_type = "0"

#class ="BDE_Image" src="https://imgsa.baidu.com/forum/w%3D580/sign=941c6a9596dda144da096cba82b6d009/e889d43f8794a4c2e5d529ad0ff41bd5ac6e3947.jpg" pic_ext="jpeg" height="350" width="560" >
#<img pic_type="0" class="BDE_Image" src="https://imgsa.baidu.com/forum/w%3D580/sign=941c6a9596dda144da096cba82b6d009/e889d43f8794a4c2e5d529ad0ff41bd5ac6e3947.jpg" pic_ext="jpeg" height="350" width="560">
def get_img(html,page):
    soup=BeautifulSoup(html,"lxml")#所有图片地址
    img_list=soup.find_all("img",class_,"BDE_Image")
    n=1
    for img in img_list:
        url=img.get("src")
        name="./image/第%d页-%d.jpg" %(page,n)
        urlretrieve(url,name)
        n+=1
def get_all_img():
    try:
        for page in range(1,4):
            url = base_url+"pn=%d" % page
            print(url)
            html=get_html(url)
            get_pic(html.page)
    except Exception as e:
        print(e)
if __name__=="__main__":
    base_url="http://tieba.baidu.com/p/2460150866"
    get_all_img()
    print("处理完成")









# r=requests.get("http://www.baidu.com")
# print(r.status_code)  #响应成功
#
# print(r.raw)#返回原始响应体
# print(r.content)#响应数据（字节数据）
# #print()#响应数据（字符数据）
# print(r.headers)#响应头

#print(r.json())#requests内置解码器
#print(r.raise_for_status())#失败请求抛出异常


