#爬虫
import urllib
from http import cookiejar

import requests
from fake_useragent import UserAgent

# #发送响应请求
# url = 'http://www.baidu.com'
# response = requests.get(url)  #通过get命令发送请求
# #获取相应内容
# print(response.text)   #获取响应内容，会自动采用一种编码格式有时候会乱码
# print(response.status_code)  #获取响应状态码
# #获取响应内容，以二进制编码
# print(response.content.decode())

#从网页获取图片并保存
# url ='https://img0.baidu.com/it/u=1591332046,3827572816&fm=253&fmt=auto&app=138&f=JPEG?w=800&h=1600'
# response = requests.get(url)
# #以写文件的方式将图片写入文件
# with open("1.jpg", "wb") as f:
#     f.write(response.content)

#完整爬取数据需要携带请求头
# url = 'http://www.baidu.com'

# response = requests.get(url)
# response.encoding = 'utf-8'  #设置响应数据的编码格式
# print(len(response.text))

# url = 'http://www.baidu.com'
# #设置请求头
# #携带请求头让网页认为是浏览器在进行访问
# headers = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"}
# response = requests.get(url, headers=headers)
# response.encoding = 'utf-8'
# print(len(response.text))

#使用同一个user-agent重复访问某一个网页可能会被认为是爬虫
#user-agent池防止反爬
import random
# UAlist = [
#     "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0",
#     "Mozilla/5.0 (iPad; CPU OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1 Edg/131.0.0.0",
#     "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36 Edg/131.0.0.0"
# ]
# print(random.choice(UAlist))

#使用模块防止反爬
# from fake_useragent import UserAgent
# print(UserAgent().random)
#
# url = 'http://www.baidu.com'
# headers = {'User-Agent': UserAgent().random}
# response = requests.get(url, headers=headers)
# print(response.status_code)

#明文转换
# from urllib.parse import quote,unquote
# #明文转密文
# print(quote("参数"))
# #密文转明文
# print(unquote("%E5%8F%82%E6%95%B0"))

# #携带参数发送请求
# name =  input("请输入关键字  ")
# #使用f格式化搜索想要的内容
# url = f'https://www.baidu.com/s?wd={name}'
# headers = {u'User-Agent': UserAgent().random}
# r = requests.get(url, headers=headers)
# print(r.text)


# name =  input("请输入关键字  ")
# kw = {"wd":name}  #携带参数进行访问
# #使用f格式化搜索想要的内容
# url = f'https://www.baidu.com/s?'
# headers = {u'User-Agent': UserAgent().random}
# r = requests.get(url, headers=headers,params=kw)
# print(r.text)


#获取酷狗图片
# url = 'https://p3fx.kgimg.com/stdmusic/240/20241218/20241218111701254519.jpg'
# r = requests.get(url, headers={'User-Agent': UserAgent().random})
#
# with open('酷狗.jpg', 'wb') as f:
#     f.write(r.content)

# url = 'https://sharefs.tx.kugou.com/202412191821/2a4731a1bb13595d43aa32a08d69c13d/v3/a8ee3a09a1afc7d83a3c2ef24b9281f0/yp/full/ap1000_us0_pi409_mx412060334_s2805069839.mp3'
# r = requests.get(url, headers={'User-Agent': UserAgent().random})
#
# with open('酷狗.mp3', 'wb') as f:
#     f.write(r.content)

# url = 'https://mvwebfs.kugou.com/202412191824/feaed1e42f4c54ccf80e4198a96d3cd3/KGTX/CLTX002/b2a65c14f0cc288c7974f57d6c5bd220.mp4'
# r = requests.get(url, headers={'User-Agent': UserAgent().random})
# with open('酷狗.mp4', 'wb') as f:
#      f.write(r.content)

# url ='https://webfs.kugou.com/202505232023/64b5efccee611a882730f4569d7fc7cd/v3/ceb36c72f96ccd961c105f4d0cd6edb0/yp/full/ap1014_us1137539070_mii0w1iw8z2ai2iphcu80ooo2ki81120_pi406_mx27544497_s2308307180.mp3'
# response = requests.get(url,headers={'User-Agent':UserAgent().random})
# with open("落花情.mp3","wb") as f:
#     f.write(response.content)
#
url = 'https://webfs.kugou.com/202505271039/75e4854916373e5aad13c4ada4430bcd/v3/2c16392bec6b121ef222eaeb07e42eb1/yp/full/ap1014_us0_mi7428485b8538c8b3ce344f1bc9256485_pi406_mx89363940_s103214548.mp3'
response = requests.get(url,headers={'User-Agent':UserAgent().random})
with open("雨边.mp3","wb") as f:
    f.write(response.content)
#获取单页贴吧
# url = 'https://tieba.baidu.com/f?ie=utf-8&kw=%E6%9D%A8%E6%B4%8B&fr=search'
# r = requests.get(url, headers={'User-Agent': UserAgent().random})
#
# with open('杨洋.html', 'wb') as f:
#      f.write(r.content)

#获取翻页贴吧数据
# url = 'https://tieba.baidu.com/f?'
# keyword = input('请输入关键字')
# page = int(input("请输入获取的页数"))
#
# for i in range(page):
#     params = {
#         'wd': "keyword",
#         'pn': i *50
#     }
#     r = requests.get(url, params=params, headers={'User-Agent': UserAgent().random})
#
#     with open(f'{keyword}{i+1}.html','wb') as f:
#           f.write(r.content)


#面向对象实现翻页爬虫
# class Tieba:
#     def __init__(self):
#         #初始化url 和请求头
#         self.url = 'https://tieba.baidu.com/f?'
#         self.headers = {'User-Agent': UserAgent().random}
#     #发送请求,携带参数发送请求
#     def send(self,params):
#         r = requests.get(self.url, params=params, headers=self.headers)
#         #返回爬取的数据
#         return r.text
#     #保存数据
#     def save(self,page,con):
#         with open(f'{page+1}.html','w',encoding='utf-8') as f:
#             f.write(con)
#     #运行代码
#     def run(self):
#         keyword = input("请输入关键词")
#         pages = int(input("请输入抓取页码数量"))
#         for page in range(pages):
#             params = {
#                 'kw': keyword,
#                 'pn':page*50
#             }
#             data = self.send(params)
#             self.save(page,data)
# t = Tieba()
# t.run()

#cookie模拟用户登录

# url = 'https://m.acfun.cn/'
# headers = {
#     'User-Agent': UserAgent().random,
#     'cookie': "_did=web_53133760D24DC34; _did=web_53133760D24DC34; csrfToken=uU298Nl26V3XrkVVFMA88VuB; webp_supported=%7B%22lossy%22%3Atrue%2C%22lossless%22%3Atrue%2C%22alpha%22%3Atrue%2C%22animation%22%3Atrue%7D; Hm_lvt_2af69bc2b378fb58ae04ed2a04257ed1=1734609998; HMACCOUNT=D0C08FDD0CBA6C65; did=web_7db96112833ec9011fb35a55aba0e7d6; didv=1734610014843; lsv_js_player_v2_main=ca85g8; cur_req_id=9285005219961E86_self_abdcaf400c51fe7802f7a3f4e3a70cd1; cur_group_id=9285005219961E86_self_abdcaf400c51fe7802f7a3f4e3a70cd1_0; ac_username=%E4%B8%8D%E5%AE%9C%E5%A6%84%E8%87%AA%E8%8F%B2%E8%96%84%E7%9A%84%E8%B5%B5%E5%AE%B6%E8%80%81; ac_userimg=https%3A%2F%2Fimgs.aixifan.com%2Fstyle%2Fimage%2FdefaultAvatar.jpg; acPostHint=8389ef54e68116d44f755c4ae35fb6830480; acPasstoken=ChVpbmZyYS5hY2Z1bi5wYXNzdG9rZW4SYP4S_GDBTvXlvcna-o-rhxH8cERUil--dxjmxzivHMWzoL0HWdifxdWW0Ul_foJe3OY148MFnV0uYf-sRvEXV-x2ZxtsCNB6cWNrnb1T7LjKQ3gsoqIi5zoX-3DR71lWMRoSBhnl2gCmU6FTL9Aq8R6fTHHuIiB-H46ypuNtNaoljM3kMMmH3WFOZAEZZx_I5lj-jntOXSgFMAE; auth_key=63559396; Hm_lpvt_2af69bc2b378fb58ae04ed2a04257ed1=1734610058"
# }
# r = requests.get(url, headers=headers)
# print(r.text)

#金山翻译
# import json
# url ='https://ifanyi.iciba.com/index.php?c=trans'
# word = input("请输入中文")
# #使用post请求需要传入请求数据给参数data
# post_data = {
#     'from': 'zh',
#     'to': 'en',
#     'q': word
# }
# r = requests.post(url, data=post_data,headers={'User-Agent': UserAgent().random})
# # print(r.text)
# #将json格式数据转化为字典格式
# dict = json.loads(r.text)
# print(dict['out'])

# 创建session对象，session会自动处理cookie
# session = requests.Session()
# r = session.get('https://www.python.org')
# print(r.text)

#cookie池

#代理ip
#正向代理保护客户端  反向代理保护服务器
#透明代理，客户端ip和服务器ip都知道
#匿名代理，客户端ip看的到，服务器ip看不到
#高密代理，客户端和服务器的ip都看不到

#使用代理ip
# url = 'http://www.baidu.com'
#
# #需要使用有效的代理ip
# proxies = {
#     # 'http': 'http://127.0.0.1:1080',
# }
# r = requests.get(url, proxies=proxies,headers={'User-Agent': UserAgent().random})
# print(r.content.decode())

