import requests  # 发送请求
import time  # 获取时间
import os
import pandas as pd  # 保存csv数据
import re  # 数据清洗

url = 'https://api.bilibili.com/x/web-interface/wbi/search/all/v2'
Keyword = '《天道》'
params = {
    '__refresh__':'true',
    '_extra':'',
    'context':'',
    'page': 1,
    'page_size':'42',
    'order':'',
    'duration':'',
    'from_source':'',
    'from_spmid':'333.337',
    'platform':'pc',
    'highlight':'1',
    'single_column':'0',
    'keyword':Keyword,
    'qv_id':'URSLzYhDYlzKhRSokUuA8ziGPt28O543',
    'ad_resource':'5646',
    'source_tag':'3',
    'web_location':'1430654',
    'w_rid':'78a95b4feb3dceafc6a73c12a944cc7d',
    'wts':'1687619301'
}
headers = {
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 ",
    "accept": "application/json, text/plain, */*",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7",
    "sec-ch-ua": "\"Google Chrome\";v=\"113\", \"Chromium\";v=\"113\", \"Not-A.Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"macOS\"",
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-site",
    "cookie": "buvid3=30DCD633-B477-E874-AA05-5DA6ABB2F6A021064infoc; i-wanna-go-back=-1; b_ut=7; _uuid=7101039A1F-A1A7-1A68-B197-DA6D7952435319141infoc; FEED_LIVE_VERSION=V8; buvid_fp=b893835b346bb2df28cf6d59617ae77d; home_feed_column=5; b_nut=1686034019; nostalgia_conf=-1; CURRENT_FNVAL=4048; rpdid=|(YYl~RmRYl0J'uY)Y|JR|YR; browser_resolution=1440-732; innersign=0; b_lsid=6F7BB310F_188EDEE3985; header_theme_version=CLOSE; buvid4=3F492537-BAC7-E72F-9EC2-47AA611EC98F21988-023060614-yW9JSiNaa2opqspXhzGdUg%3D%3D; sid=6yzew98r; PVID=1"
}
all_data = []
for i in range(1, 51):
    params['page'] = i
    r = requests.get(url, headers=headers, params=params)
    time.sleep(0.2)  # 休眠1秒
    if r.status_code == 200:
        data_rawlist = r.json()['data']['result'][-1]['data'] # 获取视频信息
        ## 数据清洗
        data_list = []
        for data in data_rawlist:
            content = {
                'title': re.compile(r'<[^>]+>', re.S).sub('', data['title']),
                'play': data['play'],
                'tag': data['tag'],
                'pubdate': data['pubdate'],
                'duration': data['duration'],
                'rank_score': data['rank_score'],
                'favorites': data['favorites'],
                'description': re.compile(r'<[^>]+>', re.S).sub('', data['description']),
                'author': data['author'],
                'url': data['arcurl'],
                'pic': data['pic'],
            }
            if '天道' in content['tag'] or '电视剧' in content['tag'] or '王志文' in content['tag']:
                if '不公' in content['title'] or  '酬勤' in content['title'] or '台八' in content['title']:
                    continue
                if '火影' not in content['tag'] and '动画' not in content['tag'] and '漫'  not in content['tag']:
                    # print(data)
                    data_list.append(content)
            
        all_data.extend(data_list)
        print('第' + str(i) + '页数据获取成功')
    else:
        print('第' + str(i) + '页数据获取失败')
        break
print(r.status_code)  # 查看响应码
import json
json.dump(all_data, open('src/data/bilidata.json', 'w', encoding='utf-8'), ensure_ascii=False)
