import urllib.request
import urllib.parse
# 获取一个get请求
# response = urllib.request.urlopen("http://www.baidu.com")  # 获取网页
# print(response.read().decode('utf-8'))  # 用utf-8进行解码

# 用于测试的网址： httpbin.org

# 获取一个post请求
# response = urllib.request.urlopen("http://www.baidu.com/post")
# print(response.read().decode('utf-8'))
from urllib import request

'''
结果: urllib.error.HTTPError: HTTP Error 404: Not Found
原因：当进行post访问，必须传递一些表单信息
改进：看下面
'''
# import urllib.parse
#
# data = bytes(urllib.parse.urlencode({"name": "eric"}), encoding="utf-8")  # 封装了一个data信息
# response = urllib.request.urlopen("http://httpbin.org/post", data=data)  # 模拟浏览器发出请求，post请求加上封装信息用于模拟用户真实登录
# print(response.read().decode('utf-8'))
'''
运行结果可以和 http://httpbin.org/#/HTTP_Methods/post_post 相比较
结果：{
  "args": {}, 
  "data": "", 
  "files": {}, 
  "form": {
    "name": "eric"
  }, 
  "headers": {
    "Accept-Encoding": "identity", 
    "Content-Length": "9", 
    "Content-Type": "application/x-www-form-urlencoded", 
    "Host": "httpbin.org", 
    "User-Agent": "Python-urllib/3.9", 
    "X-Amzn-Trace-Id": "Root=1-619ee387-7c8b059350fbbefd6ed9e2a1"
  }, 
  "json": null, 
  "origin": "61.188.187.56", 
  "url": "http://httpbin.org/post"
}
'''

# 模拟get
# response = urllib.request.urlopen("http://httpbin.org/get")
# print(response.read().decode('utf-8'))
'''
可以和 http://httpbin.org/#/HTTP_Methods/get_get 比较
结果：{
  "args": {}, 
  "headers": {
    "Accept-Encoding": "identity", 
    "Host": "httpbin.org", 
    "User-Agent": "Python-urllib/3.9", 
    "X-Amzn-Trace-Id": "Root=1-619ee4a8-66f2f51974d899a002453f9f"
  }, 
  "origin": "61.188.187.56", 
  "url": "http://httpbin.org/get"
}
其中  "User-Agent": "Python-urllib/3.9", 直接识别出了为爬虫访问，要伪装需要复制这个键值对为浏览器的
'''

# 超时处理
# try:
#     response = urllib.request.urlopen("http://httpbin.org/get",timeout=0.01)
#     print(response.read().decode('utf-8'))
# except urllib.error.URLError as e:
#     print("time out!")


# 响应头问题
# response = urllib.request.urlopen("http://www.baidu.com")
# print(response.status)  # 接收到200的状态码
# '''
# response = urllib.request.urlopen("http://www.douban.com")
# print(response.status)  # urllib.error.HTTPError: HTTP Error 418: 说明已经被识破为爬虫
# '''
# print(response.getheaders())  # 进入百度 F12 点击网络 刷新百度页面 停止 点击标头 查看响应头 就是和程序结果类似的,User-Agent在最下面


# 不暴露自己为爬虫，伪装自己为浏览器（post）
# url = "http://www.douban.com"
# headers = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36 Edg/96.0.1054.29"}
# data = bytes(urllib.parse.urlencode({"name": "eric"}), encoding="utf-8")  # 封装一个data信息
# req = urllib.request.Request(url=url, data=data, headers=headers, method="POST")  # 封装为req对象
# response = urllib.request.urlopen(req)  # 传入req对象
# print(response.read().decode("utf-8"))  # 能够获取道网页源代码


# 不暴露自己为爬虫，伪装自己为浏览器（get）
# url = "http://www.douban.com"
# headers = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36 Edg/96.0.1054.29",
# }
# req = urllib.request.Request(url=url,  headers=headers)  # 封装为req对象
# response = urllib.request.urlopen(req)  # 传入req对象
# print(response.read().decode("utf-8"))  # 能够获取道网页源代码


# ==============================================================================
#  使用IP代理以及多个不同User-Agent，模拟不同浏览器进行爬取
import random
import re


url="https://movie.douban.com/top250?start="

#代理ip
proxylist=[{"https":"114.104.139.90"},{"https":"223.240.98.231"},{"https":"60.168.207.24"}]
proxy=random.choice(proxylist)
print(proxy)

#在headers设置不同User-Agent，模拟不同浏览器
agent1="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34)"
agent2="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34"
agent3="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34"
agent4="Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20100101 Firefox/17.0 "
agent5="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2"
agent6="Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5"
# agentlist=[agent1,agent2,agent3,agent4,agent5]
# agent=random.choice(agentlist)
print(agent1)
headers={"User-Agent":agent1}

#创建处理器,用来创建opener
proxyHandler=request.ProxyHandler(proxy)

#创建opener
opener=request.build_opener(proxyHandler)

#创建请求对象
req=request.Request(url,headers=headers)

#发送请求，返回响应
response=opener.open(req).read().decode("utf-8")

#用正则清洗数据
pat=r'<title>(.*?)</title>'
data=re.findall(pat,response)

#输出结果
print(data[0])


