#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import random

def reload_page(url):
    '''
    发送请求，并且接受响应页面
    url：请求的完整url信息
    '''
    print("正在下载:" + url)
    ua_list = [
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"]

    ua = random.choice(ua_list)


    request = urllib2.Request(url,headers={"User-Agent":ua})
    response = urllib2.urlopen(request)
    html = response.read()
    return html

def write_page(url,page):
    '''
    将获取到的页面写入到文件
    url：完整URL信息
    page：页码（第几页）
    '''
    print("正在写入:" + url + "的第" + str(page) + "页")
    file_name = "text/the-page"+str(page)+".html"

    content = reload_page(url)

    with open(file_name,'w') as f:
        f.write(content)


def spider_tieba(tieba_name,star_page,end_page):
    '''
    贴吧爬虫
    tieba_name：要在百度贴吧搜索的贴吧的 主题名称
    star_page：开始的页码
    end_page：结束的页码
    '''
    tieba_url= "http://tieba.baidu.com/f?"
    for page in range(int(star_page),int(end_page) +1):
        tieba_url_name = tieba_url + urllib.urlencode({'kw':tieba_name})
        pn = page*50
        full_url = tieba_url_name + '&' + urllib.urlencode({'pn': pn})
        write_page(full_url,page)

    print("完成 百度贴吧  " + tieba_name + " 第 " + str(star_page) + " 到 " + str(end_page) + "  的下载！")

if __name__ == "__main__":
    tieba_name = raw_input("请输入贴吧名称-》》：")
    star_page = raw_input("请输入开始的页码：")
    end_page = raw_input(" 请输入结束的页码：")

    spider_tieba(tieba_name,star_page,end_page)




