import requests
import json
import pandas as pd

#构造解析网页数据函数
def url_parser(url):
    '''
    此函数用来解析网络数据
    '''
    #获取热门推荐源码
    r=requests.get(
                url, 
                headers={
    'accept': '*/*',
    'accept-encoding': 'gzip, deflate, br',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cookie': "buvid3=966C5B24-FB70-4BFF-8831-C53E65E292AE148804infoc; i-wanna-go-back=-1; buvid_fp_plain=undefined; nostalgia_conf=-1; CURRENT_BLACKGAP=0; DedeUserID=280761996; DedeUserID__ckMd5=b23eb014ad93fd9d; b_ut=5; LIVE_BUVID=AUTO8716535790436119; blackside_state=0; hit-dyn-v2=1; _uuid=CC1C8B610-1F9E-9DE10-D1108-5E2EDE9333CA19674infoc; b_nut=100; is-2022-channel=1; buvid4=74833781-4FD2-62E8-B2B0-1FC061E9B84887999-022022409-jpYbLRJjQgspAlmMP1mh%2Bg%3D%3D; rpdid=|(YuJm~uuJm0J'uYY)Ym)uYR; hit-new-style-dyn=0; fingerprint=bc4d7d1948f7d5726a9bd30ceb16f58b; buvid_fp=bc4d7d1948f7d5726a9bd30ceb16f58b; CURRENT_QUALITY=112; bp_video_offset_280761996=749519398764544000; b_lsid=A4B48669_185A4D7A9AF; bsource=search_baidu; SESSDATA=c451f6de%2C1689059875%2C0a0c6%2A12; bili_jct=2efb18c565541c12a136b8215c55e209; sid=8i20d4li; CURRENT_FNVAL=4048; PVID=2; innersign=0",
    'origin': 'https://www.bilibili.com',
    'referer': 'https://www.bilibili.com/',
    'sec-ch-ua': '"Not?A_Brand";v="8", "Chromium";v="108", "Microsoft Edge";v="108"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': 'cors',
    'sec-fetch-site': 'same-site',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.76'
}
                )
    if r.status_code!=200:
        raise Exception('error')

    #解析所获取源码
    r.encoding='utf-8'
    data_json=json.loads(r.text)#动态网页大部分是用json格式上传的，故此处用json模块解析网页数据
    
    #获取所需数据
    comments_list=data_json['data']['list']#获取data下面的list列表，从而得到视频详细数据
    comments=[]
    for i in range(len(comments_list)):
        comment={
           '网址':comments_list[i]['pic'],#视频网址
           '主题':comments_list[i]['tname'],#视频主题
           '名称':comments_list[i]['title'],#视频名称
           '作者':comments_list[i]['owner']['name']#作者用户名
        }
        comments.append(comment)
    return comments

#根据Fetch/XHR,得到动态网页请求url
urls=[]
for i in range(1,21):
    url=f'https://api.bilibili.com/x/web-interface/popular?ps=20&pn={i}'
    urls.append(url)

#执行所有请求url的页面解析
all_datas=[]
for url in urls:
    datas=url_parser(url)
    all_datas.extend(datas)

#将所得数据全部导入excel
df=pd.DataFrame(all_datas)
df.to_excel('b站每日热门.xlsx')
