import requests
import csv
import time

class BZhan():
    def __init__(self):
        self.headers = {
            "Origin": "https://www.bilibili.com",
            "Pragma": "no-cache",
            "Referer": "https://www.bilibili.com/v/popular/all/?spm_id_from=333.1007.0.0",
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-site",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
        }
        self.url = "https://api.bilibili.com/x/web-interface/popular"

    def get_data(self, page):
        params = {
            "ps": "20",
            "pn": str(page),
        }
        try:
            response = requests.get(self.url, headers=self.headers, params=params)
            response.raise_for_status()  # 检查请求是否成功
            res = response.json()
            if 'data' in res and 'list' in res['data']:
                return res['data']['list']
            else:
                print(f"Unexpected response format: {res}")
                return []
        except requests.exceptions.RequestException as e:
            print(f"Request failed: {e}")
            return []

    def parse_data(self, res):
        parsed_data = []
        for data in res:
            item = {
                '标题': data['title'],
                '视频分类': data['tname'],
                'aid': data['aid'],
                'bvid': data['bvid'],
                '视频描述': data['desc'],
                '视频封面': data['pic'],
                'up主': data['owner']['name'],
                '投币数': data['stat']['coin'],
                '收藏数': data['stat']['favorite'],
                '弹幕数': data['stat']['danmaku'],
                '点赞数': data['stat']['like'],
                '观看数': data['stat']['view'],
                '分享数': data['stat']['share'],
                '评论数': data['stat']['reply'],  # 添加评论数
                '转发数': data['stat']['share'],  # 添加转发数，示例中和分享数一样，可以根据实际需要修改
                '播放量': data['stat']['view'],  # 添加播放量，示例中和观看数一样，可以根据实际需要修改
                '视频分区占比': self.calculate_partition_ratio(data['stat']),  # 计算视频分区占比
            }
            parsed_data.append(item)
            print(item)
        return parsed_data

    def calculate_partition_ratio(self, stat):
        total_views = stat['view']
        if total_views > 0:
            partition_ratio = {
                '动画': stat['view'] / total_views,
                '番剧': stat.get('aid', 0) / total_views,
                '国创相关': stat.get('bvid', 0) / total_views,
                '音乐': stat.get('desc', 0) / total_views,
                '舞蹈': stat.get('pic', 0) / total_views,
                '游戏': stat.get('owner', 0) / total_views,
                '科技': stat.get('stat', 0) / total_views,
                '生活': stat.get('coin', 0) / total_views,
                '鬼畜': stat.get('favorite', 0) / total_views,
                '时尚': stat.get('danmaku', 0) / total_views,
                '广告': stat.get('like', 0) / total_views,
                '娱乐': stat.get('share', 0) / total_views,
                '电影': stat.get('reply', 0) / total_views,
                '电视剧集': stat.get('share', 0) / total_views,
            }
        else:
            partition_ratio = {
                '动画': 0,
                '番剧': 0,
                '国创相关': 0,
                '音乐': 0,
                '舞蹈': 0,
                '游戏': 0,
                '科技': 0,
                '生活': 0,
                '鬼畜': 0,
                '时尚': 0,
                '广告': 0,
                '娱乐': 0,
                '电影': 0,
                '电视剧集': 0,
            }
        return partition_ratio

    def save_to_csv(self, data):
        keys = data[0].keys()  # 使用第一个数据项的键作为CSV文件的标题行
        with open('bilibili_videos.csv', 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=keys)
            writer.writeheader()
            for item in data:
                writer.writerow(item)

    def main(self):
        all_data = []
        for i in range(1, 6):  # 只获取前5页的数据，避免过多的网络请求
            res = self.get_data(i)
            if res:
                parsed_data = self.parse_data(res)
                all_data.extend(parsed_data)
                self.save_to_csv(parsed_data)
            time.sleep(1)

if __name__ == '__main__':
    bz = BZhan()
    bz.main()
