import requests
import re
# 忽略警告的方式
import urllib3
import logging
# r = requests.get('https://www.baidu.com/')
# print(type(r))
# print(r.status_code)
# print(type(r.text))
# print(r.text[:100])
# print(r.cookies)

# data = {
#     'name': 'germey',
#     'age': 25
# }
# r = requests.get('https://www.httpbin.org/get', params=data)
# # print(r.text)
# print(r.json())

# 提取网页
# r = requests.get('https://ssr1.scrape.center/')
# pattern = re.compile('<h2.*?>(.*?)</h2>',re.S)
# titles = re.findall(pattern,r.text)
# print(titles)

# 提取二进制数据
# r = requests.get('https://scrape.center/favicon.ico')
# print(r.text)
# print(r.content)
# with open('favicon.ico', 'wb') as f:
#     f.write(r.content)

# requests.codes内置状态码
# r = requests.get('https://ssr1.scrape.center/')
# exit() if not r.status_code == requests.codes.ok else print('Request Successfully')


# 文件上传
# files = {'file': open('favicon.ico', 'rb')}
# r = requests.post('https://www.httpbin.org/post', files=files)
# print(r.text)

# 获取cookie
# r = requests.get('https://www.baidu.com')
# print(r.cookies)
# for key, value in r.cookies.items():
#     print(key + '=' + value)

# session会话
# s = requests.Session()
# s.get('https://www.httpbin.org/cookies/set/number/123456789')
# r = s.get('https://www.httpbin.org/cookies')
# print(r.text)

# SSL证书验证
# response = requests.get('https://ssr2.scrape.center/')
# print(response.status_code)

# response = requests.get('https://ssr2.scrape.center/', verify=False)
# 会出现警告
# print(response.status_code)
# 忽略警告的方式,见导入区域,一引入urllib3，二引入logging
# urllib3.disable_warnings()
# response = requests.get('https://ssr2.scrape.center/', verify=False)
# print(response.status_code)
# 以捕获警告到日志的方式忽略警告
# logging.captureWarnings(True)
# response = requests.get('https://ssr2.scrape.center/', verify=False)
# print(response.status_code)

# 超时设置
# r = requests.get('https://www.httpbin.org/get', timeout=(5,30))
# print(r.status_code)

# 身份认证
from requests.auth import HTTPBasicAuth
# r = requests.get('https://ssr3.scrape.center/', auth=HTTPBasicAuth('admin', 'admin'))
# print(r.status_code)
# 简写模式
# r = requests.get('https://ssr3.scrape.center/', auth=('admin', 'admin'))
# print(r.status_code)


# 代理设置
# 这里运行会报错，目前不知道原因，书中也没有说明，并且没有对这段代码进行执行操作，print是我加上的
proxies = {
    'http': 'http://167.99.191.142:3128',
    'https': '51.81.80.54:80'
}
r = requests.get('https://www.httpbin.org/get', proxies=proxies)
print(r.status_code)

