#encoding=utf-8

import urllib
import urllib2
import sys
import os

def main():
    kw = raw_input("请输入需要爬取的贴吧：")
    #输入起始页和终止页，str转化为int类型
    beginPage = int(raw_input("请输入起始页:"))
    endPage = int(raw_input("请输入终止页："))
    url = "http://tieba.baidu.com/f?"
    key = urllib.urlencode({"kw":kw})


    #组合后的url
    url = url + key
    tiebaSpider(url,beginPage,endPage)

def tiebaSpider(url,beginPage,endPage):
    """
    :description：负责处理url，分配每一个url去发送请求
    :param url: 需要处理的第一个url
    :param beginPage: 爬虫执行的起始页
    :param endPage: 爬虫执行的截止页
    :return:
    """
    for page in range(beginPage,endPage+1):
        pn = (page - 1) * 50
        filename = '第' + bytes(page) + "页.html"

        #组合为完整的url，并且pn值每次增加50
        fullurl = url + "&=pn" +str(pn)

        #调用loadPage（）发送请求获取HTML
        html = loadPage(fullurl, filename)
        #将获取到的HTML页面写入本地磁盘文件
        writeFile(html, filename)
def loadPage(url,filename):
    """
    description:根据url发送请求，获取服务器响应的文件
    :param url: 需要爬取的url地址
    :param filename: 文件名
    :return:
    """
    print "正在下载"  + filename
    headers = {'User-Agent':"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}

    request = urllib2.Request(url, headers=headers)
    response = urllib2.urlopen(request)

    return  response.read()


def writeFile(html, filename):
    """
    description:保存服务器响应文件到本地磁盘文件里
    :param html:服务器响应文件
    :param filename: 那本地磁盘文件名
    :return:
    """
    print "正在储存" + filename
    with open(filename,'w') as f:
        f.write(html)
    print "-" * 20

if __name__ == '__main__':
    main()