from urllib import request
from urllib import parse
import socket
import urllib
import requests
import re
from bs4 import BeautifulSoup

URL_BAIDU_PATH = 'http://www.baidu.com'
URL_HTTPBIN_PATH = 'http://httpbin.org'
URL_IMG_PATH = 'http://www.cnu.cc/discoveryPage/hot-人像'
URL_DECODE = 'utf-8'

# 以GET方式读取百度网页的内容，即html的内容
# url = URL_BAIDU_PATH
# response = request.urlopen(url, timeout=2)
# print(response.read().decode(URL_DECODE))

# 以POST方式访问网页
# url = URL_HTTPBIN_PATH + '/post'
# data = bytes(parse.urlencode({'word':'hello'}), encoding=URL_DECODE)
# response = request.urlopen(url, data=data)
# print(response.read().decode(URL_DECODE))

# 以GET方式访问网页，处理timeout
# url = URL_HTTPBIN_PATH + '/get'
# try:
#     response = request.urlopen(url, timeout=0.1)
#     print(response.read().decode(URL_DECODE))
# except urllib.error.URLError as e:
#     if isinstance(e.reason, socket.timeout):
#         print('timout........')

# 设置hear
# headers = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, sdch",
# "Accept-Language": "zh-CN,zh;q=0.8",
# "Connection": "close",
# "Cookie": "_gauges_unique_hour=1; _gauges_unique_day=1; _gauges_unique_month=1; _gauges_unique_year=1; _gauges_unique=1",
# "Referer": "http://httpbin.org/",
# "Upgrade-Insecure-Requests": "1",
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER"
# }
#
# dict = {
# 'name': 'sky'
# }
#
# url = URL_HTTPBIN_PATH + '/post'
# data = bytes(parse.urlencode(dict),encoding=URL_DECODE)
# req = request.Request(url=url, data=data, headers=headers, method='POST')
# response = request.urlopen(req)
# print(response.read().decode(URL_DECODE))

# requests以GET方式访问
# url = URL_HTTPBIN_PATH + '/get'
# data = {'sky':'hi', 'lotus':'hei'}
# response = requests.get(url, data)
# print(response.text)

# requests以POST方式访问
# url = URL_HTTPBIN_PATH + '/post'
# data = {'sky':'hi', 'lotus':'hei'}
# response = requests.post(url, data)
# print(response.json())


# 爬取网页的所有图片
# content = requests.get(URL_IMG_PATH).text
# pattern = re.compile(r'<a href="(.*?)".*?title">(.*?)</div>',re.S)
# results = re.findall(pattern, content)
# print(results)
#
# for result in results:
#     url, name = result
#     print(url, re.sub('\s', '', name))


# 通过BeatifulSoup访问网页
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'lxml')
# 以html标准格式输出
print(soup.prettify())
print('-------------------')
print(soup.title)
print(soup.title.string)
print(soup.p)
print(soup.findAll('p'))
print(soup.p['class'])
# 只是第一个a的标签
print(soup.a)
print(soup.findAll('a'))
# id='link3'的的标签标签
print(soup.find(id='link3'))

# 所有的标签连接
for link in soup.find_all('a'):
    print(link.get('href'))
# 所有的文本内容
print(soup.get_text)
