import requests                     # 第三方库，比urllib更流行更强大
import urllib.request as ur         # 有点麻烦
from bs4 import BeautifulSoup
from lxml import etree
import re
import sys
import os

test_url = 'https://www.baidu.com/'
# bdLogo_url = 'https://www.baidu.com/img/bd_logo1.png'
bdLogo_url = "https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png"      # 鼠标右击百度logo，复制图片链接
xinlang = 'https://www.sina.com.cn/'

# 下面5行作为获取页面源码的公式
myHeader = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'}        # header里的user-agent不设置的话，很可能被网站服务认为是爬虫行为，加UA，大概率认为是正常访问，UA可以在浏览器输入about:version获取
bd_response = requests.get(test_url, headers=myHeader)
# bd_response = requests.get(test_url)
bd_response.encoding = 'utf-8'      # 先编码
with open('baidu.html', 'wb') as f:     # 以wb形式打开文件，写字节。如果是w形式打开，要获取字符串，但页面源码有各种特殊符号，有英文，数字，文字，所以用wb形式，写进byte
    f.write(bd_response.content)        # .content返回字节，.text返回字符串

bdLogo_resp = requests.get(bdLogo_url)
bd_png = open('bd.png', 'wb')
bd_png.write(bdLogo_resp.content)

kw = {'wd':'python'}
bd_param = requests.get(test_url+'s?', params=kw, headers=myHeader)
bd_param.encoding = 'utf8'
bd_param_resp = open('bd_param.html', 'wb')
bd_param_resp.write(bd_param.content)

xl_html = requests.get(xinlang, headers=myHeader)
xl_html.encoding = 'utf8'
with open('xinlang_text.html', 'w', encoding='utf-8') as ff:        # 报gbk编码错误的报错时，用utf-8的编码方式新建目标文件
    ff.write(xl_html.text)                          # 这一行和下面两行一样
    # ff.write(xl_html.content.decode())
    # ff.write(xl_html.content.decode('utf-8'))
ff.close()

# print(bd_response.text)
# print(bd_response.encoding)
# print(bd_response.content)
# print(bd_response.status_code)      # 这是响应码
# print(bd_response.request.headers)
# print(bd_response.headers)
# print(bd_response.json())       # 不是json报错
# print(bd_response.ok)

# soup_baidu = BeautifulSoup(open('baidu.html', 'r', encoding='utf-8'), 'lxml')
# print(soup_baidu.find('title').string)

# response = ur.urlopen(test_url)
# print(response.getcode())       # 获取状态码
# print(response.geturl())        # 获取url
# # print(response.getheaders(), type(response.getheaders()))       # 获取请求头
# print(response.read().decode('utf-8'))      # 获取请求体，解码获取明文
# print(response.read().decode('utf-8'))      # 这里只有换行，说明response读出来就没了，跟队列一样，音系要用这个方式写入文件，就先注释掉这一行和上一行
# # with open('myBaidu.html', 'w') as ff:     # 解码成明文了，所以用w形式打开
# #     ff.write(response.read().decode('utf-8'))
# # ff.close()
# #
# # with open('myBaidu.html', 'r') as file:
# #     lines = [line for line in file.readlines() if line.strip()]     # 去掉文件空行
# #
# # with open('myBaidu.html', 'w') as file:
# #     file.writelines(lines)          # 去掉之后重新写入
#
# ur.urlretrieve(test_url, filename='baidu_response.html')   # 同line27~35

'''
和最上面的公式做对比，上面的更好用
'''
# # 构造请求对象
# request = ur.Request(test_url, headers=myHeader)
# response1 = ur.urlopen(request)
# # print(response1.read().decode('utf-8'))       # 加了请求头，就和单纯的相应不一样了
#
# with open('baidu1.html', 'w', encoding='utf-8') as fff:
#     fff.write(response1.read().decode('utf-8'))
