from urllib import request 
import urllib 
import time
headers={
     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
}
#URL找规律
#https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=0第一页
#https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=50第二页
#https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=100第三页
#https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=150第四页

#for i in range(0,4):
    #a='https://tieba.baidu.com/f?kw=python&ie=utf-8&pn='
    #b=str(i*50)#使用内置函数str把数字转化成字符串
    #c=a+b

def loadpage(fullurl,filename):
    print("正在下载：",filename)
    req=request.Request(fullurl,headers=headers)
    resp=request.urlopen(req).read()
    return resp


def writepage(html,filename):
    print("正在保存：",filename)
    with open(filename,"wb")as f:
        f.write(html)


    print ("____________________________________________________")

#构造URL
def tiebaSpider(url,begin,end):
    for page in range(begin,end+1):
        pn=(page-1)*50
        fullurl=url+"&pn="+str(pn)    #每次请求的完整url
        filename="D:/第"+str(page)+"页.html"  #每次请求后保存的文件名称
        
        html=loadpage(fullurl,filename)#调用爬虫爬取网页
        writepage(html,filename)#把获取到的网页信息写入本地
if __name__=="__main__":
    kw=input("请输入贴吧名:")
    begin=int(input("请输入起始页："))
    end=int(input("请输入结束页："))
    url='https://tieba.baidu.com/f?'
    key=urllib.parse.urlencode({'kw':kw})#'''URL编码'''
    url=url+key
    tiebaSpider(url,begin,end)
    time.sleep(10)#为显示效果，使界面停留10秒

