import urllib.request

# 向 example.com 发起一个简单的 GET 请求
# with urllib.request.urlopen('http://www.example.com') as response:
#     # 读取返回的 HTML 内容
#     html = response.read()
#     # 打印内容 (前100个字符)
#     print(html[:100])
#     # 打印 HTTP 状态码
#     print("状态码:", response.status)
#     # 打印 HTTP 响应头
#     print("响应头:", response.getheaders())


# import urllib.parse
#
# # 定义参数字典
# params = {
#     'q': 'Python urllib',
#     'page': 1
# }
#
# # 将参数字典编码为查询字符串
# query_string = urllib.parse.urlencode(params)
# print("查询字符串:", query_string) # 输出：q=Python+urllib&page=1
#
# # 构造完整的 URL
# url = 'http://www.example.com/search?' + query_string
#
# # 发起请求
# with urllib.request.urlopen(url) as response:
#     content = response.read().decode('utf-8') # 将字节流解码为字符串
#     # ... 处理 content ...


import urllib.parse

# 要提交的数据
# import urllib.request
#
# url = 'http://www.example.com'
# # 构造一个自定义的请求对象，而不是直接使用 urlopen
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
#     'Referer': 'http://www.google.com/'
# }
#
# req = urllib.request.Request(url, headers=headers)
#
# with urllib.request.urlopen(req) as response:
#     content = response.read().decode('utf-8')
#     print(content[:500])


# from urllib.parse import urlparse, urlunparse, urljoin
#
# # 解析 URL
# parsed_url = urlparse('https://www.example.com:8080/path/to/page.html?arg1=value1#fragment')
# print("协议:", parsed_url.scheme)    # https
# print("网络地址:", parsed_url.netloc)  # www.example.com:8080
# print("路径:", parsed_url.path)      # /path/to/page.html
# print("查询参数:", parsed_url.query)   # arg1=value1
# print("片段:", parsed_url.fragment)  # fragment
#
# # 将解析后的元组重新组合成 URL
# new_url = urlunparse(('https', 'www.test.com', '/newpath', '', 'arg2=value2', ''))
# print("新 URL:", new_url) # https://www.test.com/newpath?arg2=value2
#
# # 合并相对 URL 和基础 URL（非常有用！）
# base_url = 'http://www.example.com/path/a/'
# relative_url = '../b/page.html'
# absolute_url = urljoin(base_url, relative_url)
# print("绝对 URL:", absolute_url) # http://www.example.com/path/b/page.html


# import urllib.request
# import urllib.error
#
# urls = [
#     'http://www.this-domain-does-not-exist-999999999.com', # 会引发 URLError
#     'http://www.httpstat.us/404' # 会引发 HTTPError (404 Not Found)
# ]
#
# for url in urls:
#     try:
#         with urllib.request.urlopen(url) as response:
#             content = response.read()
#             print(f"成功获取 {url}")
#     except urllib.error.HTTPError as e:
#         print(f"HTTP 错误！URL: {url}, 错误代码: {e.code}, 原因: {e.reason}")
#     except urllib.error.URLError as e:
#         print(f"URL 错误！URL: {url}, 原因: {e.reason}")


import urllib.request
import urllib.parse
import urllib.error

# 1. 定义目标 URL 和请求头
url = 'https://httpbin.org/get'
query_params = {'key1': 'value1', 'key2': '值2'} # 包含中文
headers = {'User-Agent': 'Mozilla/5.0 (My Python Bot)'}

try:
    # 2. 准备请求
    # 编码查询参数并拼接到 URL
    encoded_params = urllib.parse.urlencode(query_params)
    full_url = url + '?' + encoded_params
    # 创建 Request 对象并设置请求头
    req = urllib.request.Request(full_url, headers=headers)

    # 3. 发送请求并获取响应
    with urllib.request.urlopen(req) as response:
        data = response.read().decode('utf-8') # 读取并解码
        print("获取到的数据:")
        print(data)

# 4. 异常处理
except urllib.error.HTTPError as e:
    print(f"服务器返回了错误状态码: {e.code}")
except urllib.error.URLError as e:
    print(f"无法连接到服务器: {e.reason}")