# -*- coding:utf-8 -*-

# import socket
# import urllib.error
# import urllib.parse
# import urllib.request
import ssl 
ssl._create_default_https_context = ssl._create_unverified_context

"""
response = urllib.request.urlopen('http://www.baidu.com')

print(response.read().decode('utf-8'))

print('--------------------')

# type()方法输出响应的类型.
print(type(response))


print(response.status)  # 获取状态码
print(response.getheaders()) # 获取响应头信息.
print(response.getheader('Server')) #获取服务器web软件.
"""

# data参数

"""
data = bytes(urllib.parse.urlencode({'word':'hello'}),encoding='utf8') 

'''
转字节流采用bytes()方法. 
第一个参数需要是str类型.需要使用urllib.parse模块的urlencode()方法来将字典参数转化为字符串,第二个参数指定编码格式.
'''
response = urllib.request.urlopen('http://httpbin.org/post',data=data)  # httpbin.org提供http请求测试. 测试POST请求.
print(response.read())
"""


# timeout参数
"""
try:
    response = urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)  # 设置超时时间,捕获URLError异常.

    print(response.read())
except urllib.error.URLError as e:
    if isinstance(e.reason,socket.timeout):  #判断异常是socket.timeout类型(超时异常)
        print('timeout')
"""


# Request类构建:
"""
request = urllib.request.Request('http://www.baidu.com')
# 用urlopen()方法发送请求,该方法的参数是Request类型的对象.
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))
"""




# Request用法
"""
from urllib import request,parse

url = 'http://httpbin.org/post'
headers = {
    'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36',
    'Host':'httpbin.org'
}
dict = {
    'name':'yuan'
}

# data参数用urlencode()和bytes()方法转成字节流.
data = bytes(parse.urlencode(dict),encoding='utf-8')

req = request.Request(url=url,data=data,headers=headers,method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
"""


##* 高级用法

"""
from urllib.request import HTTPPasswordMgrWithDefaultRealm,HTTPBasicAuthHandler,build_opener
from urllib.error import URLError

'''
首先实例化HTTPBasicAuthHandler对象,其参数是HTTPPasswordMgrWithDefaultRealm对象,它利用add_password()添加进去用户名和密码.这样建立了一个处理验证的Handler.

接下来,利用这个Handler并使用build_opener()方法构建一个Opener. 这个Opener在发送请求时就说明已经验证成功了.

 利用Opener的open()方法打开链接,就完成验证了.
    该脚本报错.[Errno 111] Connection refused
'''
username = 'username'
password = 'password'
url = 'http://127.0.0.1:3000/'

p = HTTPPasswordMgrWithDefaultRealm()
p.add_password(None,url,username,password)
auth_handler = HTTPBasicAuthHandler(p)
opener = build_opener(auth_handler)

try:
    result = opener.open(url)
    html = result.read().decode('utf-8')
    print(html)
except URLError as e:
    print(e.reason)
"""


# 代理
"""
from urllib.error import URLError
from urllib.request import ProxyHandler,build_opener

proxy_handler = ProxyHandler({
    'http': 'http://IP:port',
    'https': 'https://IP:port'
})
# 使用ProxyHandler,其参数是一个字典,键名是协议类型,键值是代理链接.
opener = build_opener(proxy_handler)
try:
    response = opener.open('https://www.baidu.com')
    print(response.read().decode('utf-8'))
except URLError as e:
    print(e.reason)
"""


# Cookies处理
"""
import http.cookiejar,urllib.request

'''
先声明一个CookieJar对象,再利用HTTPCookieProcessor来构建一个Handler,最后利用build_opener()方法构建出opener,执行open()函数即可.
'''
cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
for item in cookie:
    print(item.name+"="+item.value)

### 将cookie输出成文件格式
'''
MozillaCookieJar在生成文件时使用,是CookieJar的子类,可用来处理Cookies和文件相关的事件.可以将Cookies保存成Mozilla型浏览器的Cookies格式.
'''
filename = 'Cookies.txt'
cookie = http.cookiejar.MozillaCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
cookie.save(ignore_discard=True,ignore_expires=True)


# # LWPCookieJar也可以读取和保存Cookies,会保存成LWP格式的Cookies文件.
# cookie = http.cookiejar.LWPCookieJar(filename)
filename = 'LWPCookies.txt'
cookie = http.cookiejar.LWPCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
cookie.save(ignore_discard=True,ignore_expires=True)


# 从文件LWPCookies.txt中读取并利用Cookies文件:
'''
调用load()方法来读取本地的LWPCookies文件,获取到Cookies的内容,读取Cookies之后使用同样的方法构建Handler和Opener完成操作.
'''
cookie = http.cookiejar.LWPCookieJar()
cookie.load('LWPCookies.txt',ignore_discard=True,ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
print(response.read().decode('utf-8'))
"""


# 异常

"""
# 捕获URLError异常.
from urllib import request,error
try:
    response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.URLError as e:
    print(e.reason)


import ssl 
ssl._create_default_https_context = ssl._create_unverified_context
from urllib import request,error

'''
 执行脚本报错: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:841), 导入ssl模块并设置全局变量解决.

捕获HTTPError异常,输出reason,code和headers属性.
'''
try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
    print(e.reason,e.code,e.headers,sep='\n')

'''
因为URLError是HTTPError的父类,可以先选择捕获子类错误,再捕获父类错误.代码如下:
'''
try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
    print(e.reason,e.code,e.headers,sep='\n')
except error.URLError as e:
    print(e.reason)
else:
    print('Request Successfully')
'''
输出结果说明:
先捕获HTTPError,获取它的错误状态码,原因,headers等信息,如果不是HTTPError异常,就会捕获URLError异常,输出错误原因. 最后用else处理正常逻辑.
'''


# ## reason属性返回值

import socket
import urllib.request,urllib.error
# reason属性返回的也有可能是一个对象.
try:
    response = urllib.request.urlopen('https://www.baidu.com',timeout=0.01)
    # 设置超时时间强制抛出timeout异常.
except urllib.error.URLError as e:
    print(type(e.reason))
    if isinstance(e.reason,socket.timeout):
        print('TIME OUT')
    # reason属性的结果是socket.timeout类,用isinstance()方法来判断它的类型,作出异常判断.
"""


# urlparse()
"""
from urllib.parse import urlparse

result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
print(type(result),result)
'''
利用urlparse()方法进行URL解析,先输出了解解析结果的类型.
返回结果为一个ParseResult类型的对象.6个部分:scheme(协议),netloc(域名),path(访问路径),params(参数),query(查询条件,用作GET类型的URL),fragment(锚点,用于直接定位页面内部的下拉位置).
'''

from urllib.parse import urlparse

resu = urlparse('www.baidu.com/index.html;user?id=5#comment',scheme='https')
print(resu)

resul = urlparse('http://www.baidu.com/index.html;user?id=5#comment',scheme='https')
print(resul)
'''
说明: scheme参数只有在URL中不包含scheme信息时才有效.如果URL中有scheme信息,就会返回解析出的scheme.
'''

result = urlparse('http://www.baidu.com/index.html;user?id=5#comment',allow_fragments=False)
print(result)
# fragment部分为空,会被解析为path,parameter或者query的一部分.

results = urlparse('http://www.baidu.com/index.html#comment',allow_fragments=False) # url中不包含params和query.
print(results) 
# 当URL中不包含params和query时,fragment会被解析为path的一部分.

# 返回结果为ParseResult是元组.用索引顺序或者属性名获取.
jie = urlparse('http://www.baidu.com/index.html#comment',allow_fragments=False)
print(jie.scheme,jie[0],jie.netloc,jie[1],sep='\n')
"""

# urlunparse()
"""
from urllib.parse import urlunparse

data = ['http','www.baidu.com','index.html','user','a=6','comment']  # 参数data使用了列表类型.
print(urlunparse(data))
"""

# urlsplit
"""
from urllib.parse import urlsplit

aaa = urlsplit('http://www.baidu.com/index.html;user?id=5#comment')
print(aaa)  # 返回结果SplitResult,是元组类型.可用属性和索引获取值.

bbb = urlsplit('http://www.baidu.com/index.html;user?id=5#comment')
print(bbb.scheme,bbb[0])
"""



# urlunsplit()
"""
from urllib.parse import urlunsplit

data = ['http','www.baidu.com','index.html','a=6','comment']
print(urlunsplit(data))
# 将链接各个部分组合成完整链接的方法,传入的参数也是一个可迭代对象,长度必须为5.
"""

# urljoin()
"""
from urllib.parse import urljoin
print(urljoin('http://www.baidu.com','FAQ.html'))
print(urljoin('http://www.baidu.com','https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com?wd=abc','https://cuiqingcai.com/index.php'))
print(urljoin('http://www.baidu.com','?category=2#comment'))
print(urljoin('www.baidu.com','?category=2#comment'))
print(urljoin('www.baidu.com#comment','?category=2'))
'''
说明: base_url提供三项内容scheme,netloc,path. 如果这 3 项在新的链接里不存在,就予以补充;如果新的链接存在,就使用新的链接的部分。而base_url中的params,query 和 fragment是不起作用的.
'''
"""

# urlencode()方法
"""
from urllib.parse import urlencode

params = {
    'name':'yuan',
    'age': 22
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url) 
# 声明一个字典来将参数表示出来,调用urlencode()方法将其序列化为GET请求参数. 返回结果说明参数由字典类型转化为GET请求参数.
"""

# parse_qs()
"""
# 有一串GET请求参数,利用parse_qs()方法,可以将它转回字典
from urllib.parse import parse_qs

query = 'name=germey&age=22'
print(parse_qs(query))   # 成功转回字典类型
"""

# parse_qsl()方法
# 用于将参数转化为元组组成的列表.
"""
from urllib.parse import parse_qsl

query = 'name=germey&age=22'
print(parse_qsl(query))  
# 运行结果是列表,列表中的每一个元素都是一个元组,元组的第一个内容是参数名,第二个内容是参数值.
"""


# quote()方法和unquote()方法的使用

"""
# quote()方法
# 可以将内容转化为URL编码的格式. 将中文字符转变为URL编码.
from urllib.parse import quote

keyword = '壁纸'
url = 'https://www.baidu.com/s?wd=' + quote(keyword)
print(url)
# 声明一个中文搜索文字,用quote()方法对其进行URL编码.

# unquote()方法
# 进行URL解码.
from urllib.parse import unquote
url = 'https://www.baidu.com/s?wd=%E5%A3%81%E7%BA%B8'
print(unquote(url))
# 利用unquote()方法可方便实现解码.
"""


## 3. 分析robot协议

from urllib.robotparser import RobotFileParser

'''
先创建RobotFileParser对象,再通过set_url()方法设置robots.txt的链接.
最后利用can_fetch()方法判断网页是否可以被抓取.
'''
rp = RobotFileParser()
rp.set_url('http://www.jianshu.com/robots.txt')
rp.read()
print(rp.can_fetch('*','http://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*',"http://www.jianshu.com/search?q=python&page=1&type=collections"))


# 用parse()方法执行读取和分析.
from urllib.robotparser import RobotFileParser
from urllib.request import urlopen

rp = RobotFileParser()
rp.parse(urlopen('http://www.jianshu.com/robots.txt').read().decode('utf-8'.split('\n')))
print(rp.can_fetch('*','http://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*',"http://www.jianshu.com/search?q=python&page=1&type=collections"))

# 说明: robotparser模块基本用法,利用robotparser模块,可方便判断哪些页面可以抓取,哪些页面不可以抓取.


