import requests
import time
from urllib import parse
from bs4 import BeautifulSoup
from selenium import webdriver
import random
import re

chrome_options = webdriver.ChromeOptions()
prefs = {'profile.default_content_setting_values': {'images': 2}}
chrome_options.add_experimental_option('prefs', prefs)
#chrome_options.add_argument('--headless')

#chrome_options.add_argument("user-agent:'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'")
#chrome_options.add_argument('--proxy-server=http://192.168.1.20:1081')
#chrome_options.add_argument('--no-sandbox')
driver=webdriver.Chrome(options=chrome_options)
#driver=webdriver.Chrome("./chromedriver",options=chrome_options)

driver.get('https://space.bilibili.com/447261734/video?tid=0&page=3&keyword=&order=pubdate')
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'lxml')
alist = []
for i in soup.body.select('li.small-item[data-aid]'):
    a = re.search('BV(.{10})',str(i))
    alist.append(a.group())
    print(a.group())

driver.quit()
with open("alist.txt","w",encoding="utf-8") as w:
    for i in alist:
        w.write(i.strip()+"\n")


#yutto alist.txt