# 批量爬取贴吧页面数据
from urllib import request as sss
from urllib import parse
from urllib.request import urlopen


def tiebaSpider(url,beginPage,endPage):
    '''

    :param url: 需要处理的url
    :param beginPage: 爬虫执行起始的页面
    :param endPage: 爬虫执行截止的页面
    :return:
    '''
    for page in range(beginPage,endPage+1):
        pn = (page - 1) *50
        filename = "第" + str(page) + "页.html"
        # 组合为完整的 url，并且pn值每次增加50
        fullurl = url + "&pn=" + str(pn)
        # print fullurl

        # 调用loadPage()发送请求获取HTML页面
        html = loadPage(fullurl, filename)
        # 将获取到的HTML页面写入本地磁盘文件
        writeFile(html, filename)

def loadPage(url, filename):
    '''
            作用：根据url发送请求，获取服务器响应文件
            url：需要爬取的url地址
            filename: 文件名
    '''
    print("正在下载" + filename)
    headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
    request = sss.Request(url,headers = headers)
    response = urlopen( request )
    return response.read()

def writeFile(html,filename):
    """
        作用：保存服务器响应文件到本地磁盘文件里
        html: 服务器响应文件
        filename: 本地磁盘文件名
    """
    print( "正在存储"+filename )
    with open(filename,"w") as f:
        f.write( str(html) )
    print("-"*20)


if __name__ == "__main__":
    kw = "lol" #input("请输入 需要爬去的 贴吧:")
    beginPage = 1 # int( input("起始页：") )
    endPage = 15 # int( input("结束页：") )
    url = "http://tieba.baidu.com/f?"
    key = parse.urlencode({"kw" : kw})
    print( key )

    # 组合后的url示例：http://tieba.baidu.com/f?kw=lol
    url = url + key
    tiebaSpider(url, beginPage, endPage)