'''
Author: fg
Date: 2022-12-05 11:00:11
LastEditors: fg
LastEditTime: 2022-12-21 11:31:27
'''
import requests, json
import pandas as pd
import time
import re
#需要搜索的内容

# 以下根据网页来剔除不需要的部分
def resub(item):
    item = re.sub(r'^<span>.*\n?', '', item, flags=re.MULTILINE)  ## 去掉本站特有的<span>
    item = re.sub('<em class=\"keyword\">', '', item)  ## 去掉 搜索关键词标签
    item = re.sub('</em>', '', item)  ## 去掉 /标签
    item = re.sub('&quot;', '"', item)
    return item

def getdata(start):
    name = '蛋仔派对bug'

    url = 'https://api.bilibili.com/x/web-interface/search/type?__refresh__=true&_extra=&context=&page_size=42&order=pubdate&from_source=&from_spmid=333.337&platform=pc&highlight=1&single_column=0&keyword='+name+'&qv_id=1YTMxLrVB6M7BNX1RQu4cHCDgMqRen6G&ad_resource=5654&source_tag=3&category_id=&search_type=video&dynamic_offset=0'# 去掉page
    headers = {
        'Host': 'api.bilibili.com',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
        'Cookie': "buvid3=1C98325A-73D4-4731-ABD1-D4F2291A4F1213450infoc; LIVE_BUVID=AUTO8716214967621574; CURRENT_BLACKGAP=0; blackside_state=0; i-wanna-go-back=-1; buvid4=A82954A0-4AF8-3A51-4C89-6FC0C292FCA790031-022022417-KIRa8eWo1jmWUVyFD37XXg==; CURRENT_QUALITY=80; nostalgia_conf=-1; _uuid=6EAAACC7-510EC-6A310-9B5E-48DB26D4361428378infoc; buvid_fp_plain=undefined; DedeUserID=105042078; DedeUserID__ckMd5=172994c6c2c5038a; b_ut=5; hit-dyn-v2=1; b_nut=100; CURRENT_FNVAL=4048; fingerprint=7eb531af7ac05e932186c95f692c1504; hit-new-style-dyn=0; rpdid=|(umRR)~YmRu0J'uYYmY|kRkl; share_source_origin=COPY; bsource=share_source_copy_link; bp_video_offset_105042078=733855266470101000; buvid_fp=2cf6b4b771dd5c00e7a20a1aeddd2db3; innersign=0; b_lsid=EF57D710A_184E03C335C; SESSDATA=94d83e90,1685761354,7f7a4*c2; bili_jct=f1eebcff42dff3a141d17f1dd577905c; sid=n8fhqf6t; PVID=1"
    }

    title_list, like_list, play_list = [],[],[]
		# 用于输入当前页数
    data = {
        'page': start
    }
    # 读取url中数据
    res = requests.get(url, headers=headers, data=data).content.decode('utf-8')
    
    # 变为json格式数据
    jsonfile = json.loads(res)
    # 根据自己需求改变下列内容
    if (jsonfile['data']):
        for content in jsonfile['data']['result']:
            
            标题 = resub(content['title'])
            title_list.append(标题)#标题
            
            like_list.append(content['arcurl'])#链接
            
            发布时间 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(content['pubdate']))#转换时间戳
            play_list.append(发布时间)#发布时间

				# 输出爬取页过程
        print('page'+str(start))
        """ # 因为每一页最多存放20个视频，判断如果这一页有20个视频，说明还可能有下一页，然后再爬下一页
        if len(jsonfile['data']['result']) == 20:
            getdata(start + 1) """
        ship_list=[]
        for i in range(20):             
            ship_list.append(play_list[i] + title_list[i] + like_list[i])

        #print(ship_list)
        划船 = [x for i,x in enumerate(ship_list) if x.find('车') != -1]
        print("\r\n".join(str(n) for n in 划船))
        
  
        

# 从第一页开始获取数据
getdata(1)

""" 
Data = {
    '标题': title_list,
    '链接': like_list,
    '发布时间': play_list
}

#写入excel
dataframe = pd.DataFrame(data=Data)
print(dataframe) 
dataframe.to_excel('./数据2.xlsx', index=False, encoding='utf-8')"""
#最新n个视频


    
    

print("end")


