import json,requests

# 爬取豆瓣
# url = 'https://movie.douban.com/j/chart/top_list'
# params = {
#     'type': '5',
#     'interval_id':'100:90',
#     'action': '',
#     'start':'0',
#     'limit': '20'
# }
# header = {
#     'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
# }
# response = requests.get(url=url,params=params, headers=header)
# content = response.json()
# #在使用dumps时候，会默认将汉子转成ascii编码格式，因此我们需要手动设置成False
# content1 = json.dumps(content,ensure_ascii=False)
# print(content1)





# 京东商品的爬取–普通爬取框架
# import requests
# url = "https://item.jd.com/2967929.html"
# try:
#     r = requests.get(url)
#     r.raise_for_status()
#     r.encoding = r.apparent_encoding
#     print(r.text[:1000])
# except:
#     print("爬取失败！")




# 亚马逊商品的爬取–通过修改headers字段，模拟浏览器向网站发起请求


# url="https://www.amazon.cn/gp/product/B01M8L5Z3Y"
# try:
#     kv = {'user-agent':'Mozilla/5.0'}
#     r=requests.get(url,headers=kv)
#     r.raise_for_status()
#     r.encoding=r.apparent_encoding
#     print(r.status_code)
#     print(r.text[:1000])
# except:
#     print("爬取失败")


# 爬取博客页面
from urllib.request import urlopen

from bs4 import BeautifulSoup

html = urlopen('https://blog.csdn.net/zzc15806/')  # 获取网页
bs = BeautifulSoup(html, 'html.parser')  # 解析网页
hyperlink = bs.find_all('a')  # 获取所有超链接

for h in hyperlink:
    hh = h.get('href')
    print(hh)
