import requests
import time
import random
from bs4 import BeautifulSoup
import csv

base_url = 'https://search.bilibili.com/all'
header_value = {
    'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36 Edg/100.0.1185.39',
    'Cookie': '_uuid=FB8DD6710-D5C2-275A-1852-34F33D5C7C7820011infoc; buvid3=70554B67-D7C7-4403-9B5D-BEE4CFB905C0167630infoc; b_nut=1642912120; i-wanna-go-back=-1; fingerprint=92b95a985433b4df5aa7b743d8450491; buvid_fp_plain=undefined; SESSDATA=92706e68,1658464142,26a29*11; bili_jct=6456556b21bf9099f8c0e2fc32c91b20; DedeUserID=81237103; DedeUserID__ckMd5=7f83375153a1959c; sid=5eyas80i; b_ut=5; blackside_state=0; rpdid=0zbfVGotEz|Oxx3boiS|3aj|3w1Nbuv3; LIVE_BUVID=AUTO4716429125478693; buvid_fp=118eca0859a2881bd334bdb48f6b29f3; buvid4=B4065D4C-1E4A-C0B1-D271-9C74F8D36BA458877-022012520-6F+UqQZCDCgroW8AuUKz8w==; CURRENT_BLACKGAP=0; CURRENT_QUALITY=80; bp_t_offset_81237103=639259767014948873; PVID=1; bp_video_offset_81237103=646985167658811400; hit-dyn-v2=1; CURRENT_FNVAL=4048; b_lsid=9817EC2D_1803775E187',

}
proxies = [
    {'http': 'http://121.232.148.167:9000'},
    {'http': 'http://39.105.28.28:8118'},
    {'http': 'http://113.195.18.133:9999'},
]


def parse_content(content):
    items = []
    soup = BeautifulSoup(content, 'lxml')
    video_list = soup.select('.video-list > li')
    for video in video_list:
        title = video.select_one('.info a').text.strip()
        video_time = video.select_one('.so-imgTag_rb').text.strip()
        view_count = video.select('.tags span')[0].text.strip()
        up_time = video.select('.tags span')[2].text.strip()
        up_master = video.select('.tags span')[3].text.strip()
        video_link = video.select_one('.info a').attrs['href']
        item = {
            '视频标题': title,
            '视频时长': video_time,
            '观看次数': view_count,
            '上传时间': up_time,
            'UP主': up_master,
            '视频链接': 'http:' + video_link,
        }
        # print(item)
        items.append(item)
    return items



def get_one_page(kw, page):
    params = {
        'keyword': kw,
        'page': str(page),
        'from_source': 'web_search',
    }
    try:
        r = requests.get(base_url, headers=header_value, params=params)
        # print(r.text)
        # print(r.status_code, r.headers)
        # print(r.url)
    except:
        print('请求失败')
    else:
        if r.status_code == 200:
            items = parse_content(r.text)
        sleep_time = random.randint(2, 5) + random.random()
        time.sleep(sleep_time)

    return items


if __name__ == '__main__':
    keyword = input('请输入搜索关键字：')

    #写入CSV文件中
    for i in range(1, 51):
        items = get_one_page(keyword, i)
