#!/usr/bin/env python
# -*- coding:utf-8 -*-


import urllib
import urllib2

def send_request(url):
    """
    发送请求，返回响应
    url： 需要发送请求的url地址
    """
    print "[LOG]:正在发送 %s"% url
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}
    request = urllib2.Request(url,headers = headers)
    response = urllib2.urlopen(request)
    return response.read()

def write_page(html,file_name):
    """
    将将网页数据写入磁盘文件
    html:网页原始字符串（utf-8）
    file_name :磁盘文件的文件名
    """
    print "[LOG]:正在写入 %s"%file_name
    with open(file_name,"w") as f:
        f.write(html)

def start_work(tieba_name,begin_page,end_page):
    """
        贴吧爬虫调度器，控制请求发送和查询字符串处理
        tieba_name: 贴吧名
        begin_page: 起始页
        end_page: 结束也
    """
    for page in range(begin_page,end_page + 1):
        #处理字符串
        pn = (page-1)*50
        dict_obj = {"kw":tieba_name,"pn":pn}
        kw_str = urllib.urlencode(dict_obj)

        #固定url地址
        base_url = "http://tieba.baidu.com/f?"
        #拼接后完整的url地址
        full_url = base_url + kw_str

        html = send_request(full_url)
        #构建文件名
        file_name = tieba_name + str(page) + ".html"

        write_page(html,file_name)

if __name__ == "__main__":
    tieba_name = raw_input("请输入需要抓取的贴吧名:")
    begin_page = int(raw_input("请输入需要抓取的起始页:"))
    end_page = int(raw_input("请输入需要抓取的结束页:"))

    #send_request()
    start_work(tieba_name, begin_page, end_page)



