'''urllib进阶'''
from urllib import request,parse,error
import json
# 引入超时机制
import socket
'''认证处理 HTTPS SSL 认证'''
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

url_string = r'http://httpbin.org'
'''GET'''

with request.urlopen(url_string + '/headers') as f:
    if f.status == 200:
        data = f.read().decode('utf-8')
        print(data)
        print(type(data))
        print(json.loads(data))
        # 响应的头部信息
        print(f.info())

'''带参数的GET'''
# with request.urlopen('http://www.baidu.com/s?wd=docker') as f:
# #     if(f.status == 200):
# #         data = f.read().decode('utf-8')
# #         print(data)
# #
# #     print(f.status)

'''POST'''

# 配置参数
parmes = parse.urlencode({'uname':'弘历'}).encode('utf-8')
with request.urlopen(url_string + '/post', data=parmes , timeout=1) as f:
    if f.status == 200:
        data = f.read().decode('utf-8')
        json_obj = json.loads(data)
        print(json_obj)

'''错误处理机制'''
try:
    with request.urlopen(url_string + '/post',data=parmes, timeout=0.1) as f:
        if 200 == f.status:
            data = f.read().decode('utf-8')
            print(data)
except error.URLError as err:
    if isinstance(err.reason, socket.timeout):
        print('Time out')

'''爬虫伪装'''

url_string = r'http://httpbin.org'
headers = {
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6)',
    'Host':'httpbin.org'
}

req = request.Request(url_string+'/headers',headers=headers)
with request.urlopen(req) as f:
    data = f.read().decode('utf-8')
    print(json.loads(data))


'''
注意 在访问 https 网站的时候 有时候需要 SSL 认证
这个时候就需要使用 无证书 验证

import ssl
ssl._create_default_https_context = ssl._create_unverified_context


'''
url_string = r'https://www.python.org'
headers = {
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6)',
    'Host':'www.python.org'
}

req = request.Request(url_string,headers=headers)
with request.urlopen(req) as f:
    data = f.read().decode('utf-8')
    # print(data)

'''robots.txt'''
from urllib import robotparser
url_string = r'https://www.douban.com'
user_agent = 'Other Spider'
rp = robotparser.RobotFileParser()
wsp_info = rp.can_fetch(user_agent,url_string)
print(wsp_info)