'''urllib进阶'''
from urllib import request,parse,error
import json
# 引入超时机制
import socket
url_string = r'http://httpbin.org'
'''GET'''

with request.urlopen(url_string + '/headers') as f:
    if(f.status == 200):
        data = f.read().decode('utf-8')
        print(data)
        print(type(data))
        print(json.loads(data))
        # 响应的头部信息
        print(f.info())
'''带参数的GET （此字符串 是有问题的 不是正规的GET参数）'''
# with request.urlopen('http://www.baidu.com/s?wd=docker') as f:
#     if f.status == 200:
#         data = f.read().decode('utf-8')
#         print(data)
#         print(f.status)
'''POST'''

#配置参数
parmes = parse.urlencode({'uname':'弘历'}).encode('utf-8')
with request.urlopen(url_string + '/post', data=parmes ,timeout=1) as f:
    if f.status == 200:
        data = f.read().decode('utf-8')
        json_obj = json.loads(data)
        print(json_obj)
'''错误处理机制'''
try:
    with request.urlopen(url_string + '/post',data=parmes,timeout=0.1) as f:
        if 200 == f.status:
            data = f.read().decode('utf-8')
            print(data)
except error.URLError as err:
    if isinstance(err.reason, socket.timeout):
        print('Time out')
'''爬虫伪装'''
url_sring = r'http://httpbin.org'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64)',
    'Host':'httpbin.org'
}
req = request.Request(url_string+'/headers',headers=headers)
with request.urlopen(req) as f:
    data = f.read().decode('utf-8')
    print(json.loads(data))
'''robots.txt'''
from  urllib import robotparser
url_sring = r'https://www.douban.com'
user_agent = 'other Spider'
rp = robotparser.RobotFileParser()
wsp_info = rp.can_fetch(user_agent,url_sring)
print(wsp_info)