#encoding=utf-8

#python3的爬取  注释的地方为python2代码
#https://tieba.baidu.com/f?kw=%E8%BF%90%E7%AD%B9%E5%AD%A6&ie=utf-8&pn=50

#import urllib
import urllib.request
#import urllib2
import urllib.parse
import random


USER_AGENT_LIST = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
                "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6"
                    ]

def send_request():
    
    #--------------------组织请求路径---------------
    
    #这里可以搞个url队列,保存网站中的url,然后可以设计遍历队列的方式
    base_url = "https://tieba.baidu.com/f?"
    
    #这里可以设计输入,即查询条件
    #str_dict = {'kw':raw_input("请输入要查询的贴吧名:"),'pn':int(raw_input("请输入查询贴吧第几页内容:"))*50}
    str_dict = {'kw':input("请输入要查询的贴吧名:"),'pn':int(input("请输入查询贴吧第几页内容:"))*50}

    #request_str = urllib.urlencode(str_dict)
    request_str = urllib.parse.urlencode(str_dict)
    
    full_url = base_url + request_str
    
    #-------------------构造请求对象---------------------

    user_agent = random.choice(USER_AGENT_LIST)
    
    #request = urllib2.Request(full_url)
    request = urllib.request.Request(full_url)
    
    request.add_header("User-Agent", user_agent)

    #-------------------发起请求-----------------------

    #response = urllib2.urlopen(request)
    #timeout=5表示一次http请求的时间最多5秒,一旦超过,本次请求中断
    response = urllib.request.urlopen(request, timeout=5)

    #------------------获取响应页面--------------------

    return response.read()


if __name__ == "__main__":
    html = send_request()
    
    #-------------------保存数据------------------------

    #with open('task.html','w') as f:
    with open('task.html','wb') as f:
        f.write(html)
    #取得页面后,可以对页面进行处理
