iizhiApi / utils.py
rogerxavier's picture
Update utils.py
33a670b verified
raw
history blame
No virus
3.03 kB
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
session = requests.Session()
#购买请求test
urlBase = 'https://jingling.bifangpu.com'
purchaseBase = urlBase+"/api/resource/purchase"
#截止时间信息: jinglingpan:sessid=d9aafa51-49a6-4c1f-a132-79ae613178f8; path=/; expires=Fri, 02 Aug 2024 12:56:01 GMT, jinglingpan:sessid.sig=bqFPySSp2PDTlM9vvIyHAlZQHPw; path=/; expires=Fri, 02 Aug 2024 12:56:01 GMT
cookie={"jinglingpan:sessid": "d9aafa51-49a6-4c1f-a132-79ae613178f8", "jinglingpan:sessid.sig": "bqFPySSp2PDTlM9vvIyHAlZQHPw"}
session.cookies = requests.utils.cookiejar_from_dict(cookie)
headers = {
"authority": "jingling.bifangpu.com",
"method": "GET",
"path": "/resource/detail/b01940f0f34e6ca91f68b258362d50f1",
"scheme": "https",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Dnt": "1",
"If-None-Match": "\"5b56-ChT6C0U9s3iYKFMBV41XLcqXxRc\"",
"Priority": "u=0, i",
"Sec-Ch-Ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": "\"Windows\"",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
#返回{"code": 10602}说明已经购买过
def try_puchase(fid:str,session)->dict:
payload = {
"fid": fid
}
resp = session.post(purchaseBase, data=payload)
return resp.json()
#获取网盘信息函数,如果10602购买过,则刷新页面获取网盘信息,否则购买后刷新,反正都是调用一次try_purchase函数进行判断再刷新界面
#->list / None
def get_pan_info(fid:str,session):
#返回提取码和网盘连接的list
result = []
detailContent = get_detail(fid=fid, session=session)
soup = BeautifulSoup(detailContent, 'html.parser')
copy_contents = soup.find_all('span', class_='copy-content')
if copy_contents.__len__()>0:
for info in copy_contents:
#print(info.text) # 遍历得到提取码和网盘链接
result.append(info.text)
return result
return None
#通过fid刷新页面获取html返回信息函数
def get_detail(fid:str,session)->str:
detailUrl = urlBase+'/resource/detail/'+fid
resp = session.get(detailUrl)
return resp.text
if __name__ == '__main__':
fid = "b01940f0f34e6ca91f68b258362d50f1"
session.headers=headers
jsonResp = try_puchase(fid=fid,session=session)
print(jsonResp)
panResult = get_pan_info(fid = fid,session=session)#每次先尝试购买然后重新打开detail页面查询返回
print(panResult)