
# https://www.baidu.com/s?wd=%E5%91%A8%E6%9D%B0%E4%BC%A6


# 需求  获取 https://www.baidu.com/s?wd=周杰伦的网页源码

import urllib.request
import urllib.parse
import requests


url = 'https://www.baidu.com/s?wd='

# 请求对象的定制  为了解决反爬虫的第一种手段
headers = {
    'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br, zstd',
    'Connection': 'keep-alive',
    'Referer': 'https://www.baidu.com/'
}

# 将周杰伦三个字变成Unicode编码的格式
# 需要依赖于urllib.parse
name = urllib.parse.quote('周杰伦')
# print(name)
url = url + name
# print(url)


# 发送GET请求
response = requests.get(url=url, headers=headers)

# 获取响应的内容
content = response.text  # 文本用text  图片使用context

# 打印数据
print(content)


# # 请求对象定制
# request = urllib.request.Request(url=url, headers=headers)
#
# # 模拟浏览器向服务器发送请求
# response = urllib.request.urlopen(request)
#
# # 获取响应的内容
# content = response.read().decode('utf-8')
#
# # 打印数据
# print(content)












# import requests
# from bs4 import BeautifulSoup
# import urllib.parse
#
# url = 'https://www.baidu.com/s?wd='
#
# # 设置请求头
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
#     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
#     'Accept-Language': 'en-US,en;q=0.9',
#     'Accept-Encoding': 'gzip, deflate, br',
#     'Connection': 'keep-alive',
#     'Referer': 'https://www.baidu.com/'
# }
#
# # 将搜索关键词转换为URL编码格式
# name = urllib.parse.quote('周杰伦')
# full_url = url + name
#
# # 发送GET请求
# response = requests.get(full_url, headers=headers)
#
# # 确保请求成功
# if response.status_code == 200:
#     # 获取响应的HTML内容
#     html_content = response.text
#
#     # 使用BeautifulSoup解析HTML
#     soup = BeautifulSoup(html_content, 'lxml')
#
#     # 打印出解析后的内容（你可以根据需要提取具体的信息）
#     print(soup.prettify())  # 格式化输出HTML内容
#
#     # 示例：提取所有的超链接
#     for link in soup.find_all('a'):
#         print(link.get('href'))
# else:
#     print(f"请求失败，状态码：{response.status_code}")

