import csv
import requests
from bs4 import BeautifulSoup
import datetime
url='https://www.bilibili.com/v/popular/rank/all'
#发起网络请求
response=requests.get(url)
html_text=response.text
soup=BeautifulSoup(html_text,'html.parser')
class Video:
    def __init__(self,rank,title,score,see,up,up_id,url):
        self.rank=rank
        self.title=title
        self.score=score
        self.see=see
        self.up=up
        self.up_id=up_id
        self.url=url
    def to_csv(self):
        return [self.rank,self.title,self.score,self.see,self.up,self.up_id,self.url]

    @staticmethod
    def csv_title():
        return['排名','标题','分数','播放量','up主','upID','URL']

items=soup.findAll('li',{'class':'rank-item'})
vidoes=[]
for itm in items:
    title=itm.find('a',{'class':'title'}).text
    rank = itm.find('div', {'class': 'num'}).text
    score = itm.find('div', {'class': 'pts'}).find('div').text
    see = itm.find('span', {'class': 'data-box'}).text
    up = itm.find_all('a')[2].text
    up_id= itm.find_all('a')[2].get('href')[len('//space.bilibili.com/'):]
    url = itm.find('a', {'class': 'title'}).get('href')
    v=Video(rank,title,score,see,up,up_id,url)
    vidoes.append(v)
now_str=datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
file_name=f'top100_{now_str}.csv'
with open(file_name,'w',newline='',encoding='utf-8') as f:
    pen=csv.writer(f)
    pen.writerow(Video.csv_title())
    for v in vidoes:
        pen.writerow(v.to_csv())
