import requests
from bs4 import BeautifulSoup
# from lxml import xpath
# url = 'https://www.baidu.com/'
# params = {
#     'tn': '15007414_3_dg'
# }
# rquest = requests.get(url, params=params)
# rquest.encoding = 'utf-8'
# print(rquest.text)


# 二进制
# ur = 'https://pss.bdstatic.com/static/superman/img/topnav/newxueshuicon-a5314d5c83.png'
header = {
    'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 SLBrowser/9.0.6.5061 SLBChan/105 SLBVPV/64-bit',
    'Cookie':
        'BIDUPSID=1078281AB1E0B74960C10778B38ADE12; PSTM=1644974686; indexPageSugList=%5B%22%E9%A9%AC%E5%8C%96%E8%85%BE%E7%85%A7%E7%89%87%22%2C%22%E9%A9%AC%E4%BA%91%E7%85%A7%E7%89%87%22%2C%22%E4%BB%8B%E7%BB%8D%E8%8B%B1%E9%9B%84%E7%9A%84ppt%E7%9A%84%E7%BB%93%E6%9D%9F%E9%A1%B5%E9%9D%A2%22%5D; BDUSS=U1ZSGYyTzl5T1RzMXprcjJ3aUYzQXVDVHlmU1F0ZX5JaUNlemFrZ3ZXSWxQSlpuRVFBQUFBJCQAAAAAAQAAAAEAAAD2p56EYmR3cHNiY2MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACWvbmclr25nd; BAIDUID=978EFE03A14EA9A66CE6E87187D68F47:FG=1; BDUSS_BFESS=U1ZSGYyTzl5T1RzMXprcjJ3aUYzQXVDVHlmU1F0ZX5JaUNlemFrZ3ZXSWxQSlpuRVFBQUFBJCQAAAAAAQAAAAEAAAD2p56EYmR3cHNiY2MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACWvbmclr25nd; BAIDUID_BFESS=978EFE03A14EA9A66CE6E87187D68F47:FG=1; BA_HECTOR=2000808hag8h8gaha08l8g058l2h021k7m0sl24; ZFY=po7uSSSbFPt83rULr4R5v4mhSUlcA8nTQlrDhWGORVE:C; delPer=0; PSINO=5; BDRCVFR[HiwRUErS-xt]=mk3SLVN4HKm; H_PS_PSSID=60272_62327_63140_63326_63798_63881_63936_63955_63972_63274_63992_64011_64026_64058_63817_64085_64127; BDORZ=FFFB88E999055A3F8A630C64834BD6D0; H_WISE_SIDS=62327_63798_63936_64127; arialoadData=false; ab_sr=1.0.1_Mjg3ZDNkNzE1YmQyNmJiODcwZjY4ODlmN2FiMmQ4YmRhOTExNGYwOTE2NTZmYTQ1NjhiZTY0YzNjNmVlNzQxYTAyOTY2NjQ0NjAwOTk0ZmQxZWFlYjlhNTk0ZjBiMzFmM2FlMmQ4NGZmYjM1N2JhMzA0ZmU2MzVkNWM5NzJlY2IxMWVlNjk0ZDFkMmFiZjViNjIzNDVlMDAxY2MyZTVjNA=='
}
# r = requests.get(ur,headers=header)
# print(r.content)
# with open('newxueshuicon-a5314d5c83.png','wb') as f:
#     f.write(r.content)

# json 请求
# url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&word=%E5%9B%BE%E7%89%87&ie=utf-8&fp=result&fr=&ala=0&applid=10038263699024034762&pn=60&rn=30&nojc=0&gsm=3c&newReq=1'
#
# reques = requests.get(url, headers=header)
# print(reques.json()['data']['images'][0]['objurl'])

#
# url = 'http://httpbin.org/post'
# data = {
#     'name': 'x',
#     'age': 18
# }
# r = requests.post(url, headers=header, data=data)
# print(r.text)
#

# s = requests.Session()    # 可以使用连续的请求，会保存之前的cookie
#
# url = 'http://httpbin.org/cookies/set/sessioncookie/12334456'
# html = s.get(url)
# print(html.text)
# r = s.get('http://httpbin.org/cookies')
# print(r.text)
# search = input('你要查找的内容')
# for i in range(0, 64, 8):
#     url = f'https://www.baidu.com/s?wd={search}&pn={i}'
#     request = requests.get(url, headers=header)
#     soup = BeautifulSoup(request.text, 'lxml')
#     # divs = soup.select('div#content_left div.result.c-container.xpath-log.new-pmd h3 a')
#     # urls = soup.xpath('//div[@id="content_left"]/div[contains(@class,"result")]')
#     for url in urls:
#         print('跳转链接', url)
#         # print('标题是', div['title'])

# url = 'https://news.163.com/'
# response = requests.get(url, headers=header)
# soup = BeautifulSoup(response.text, 'html.parser')
# lis = soup.find_all(name='ul',class_='top_news_ul')
# for li in lis:
#     for a in li.find_all('a'):
#         print(a.get('href'))
#

