#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import random
def reload_page(url,page):
    '''
    访问给定域名，获取内容
    url:完整的页面访问路径
    :return:
    '''

    ua_list = [
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"]

    ua = random.choice(ua_list)
    request = urllib2.Request(url, headers={"User-Agent":ua})
    response = urllib2.urlopen(request)
    html = response.read()
    print("完成 " + str(page) + " 页的下载！")

    return html


def write_page(html, page):
    '''
    将获取到的网页内容，写到文件里
    html:要写入文件的内容
    :return:
    '''
    file_name = "text/the_page_" + str(page) + ".html"
    with open(file_name,'w') as f:
        f.write(html)

    print("完成 " + file_name + " 的创建！")

def tieba_spider(name,start_page,end_page):
    '''
    百度贴吧爬虫，完成给定的贴吧范围的内容的爬虫
    :return:
    '''
    url = "http://tieba.baidu.com/f?"

    for page in range(int(start_page), int(end_page)+1):
        full_url = url + urllib.urlencode({'kw':name}) + "&pn=" + str(page*50)
        content = reload_page(full_url,page)
        write_page(content, page)


if __name__=="__main__":
    tieba_name = raw_input("输入要查询的贴吧名称：")
    start_page = raw_input("输入起始页：")
    end_page = raw_input("输入结束页：")

    tieba_spider(tieba_name, start_page, end_page)

    print("完成  " +  tieba_name + "  的下载！")