import requests
import json
from urllib.parse import quote
import csv
import time
import hashlib

# 一共有八千多条数据，结束条件is_end=True,数据量过大时请手动结束代码Ctrl+C,以免触发网站主题权益
class Comment(object):
    def __init__(self):
        self.url = 'https://api.bilibili.com/x/v2/reply/wbi/main'
        self.headers = {
            'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
            'cookie': "buvid3=5D7FF747-6A33-14C8-CED4-A1DF459786E457370infoc; b_nut=1751190157; b_lsid=51E7D7210_197BB113539; _uuid=FB6E873A-7BAC-BEC3-2218-ECD10D3FC2CE556607infoc; buvid4=076AAB67-8660-ABB9-58F7-59F2AD98EA0E57922-025062917-1FX2mk9F0GD0oRcZ%2Bx0MQw%3D%3D; buvid_fp=a75d60290a9a59a581c33718c49d75a9; header_theme_version=CLOSE; enable_web_push=DISABLE; home_feed_column=5; SESSDATA=7e627272%2C1766742182%2C47d3e%2A62CjC9zBZwIC7VwY_YSnuC4tE4_irAYnBLGanHBxO2DcqSz9cxB_5FAcUdHp5QjG7nZ9ESVm5aaVFvNmhSbXp5RXFCckxBV1FyY2ZhMjhHQlJFc1E3UkRuQ3pQcjBYLWdiTlluTGMzNXNmSGZSdTRYR1FtNV9TZ09ybUY2X09PVHZKdUl3VFFNODFRIIEC; bili_jct=1b553a45940d0a8f9f23b9e4a7b9128b; DedeUserID=490105042; DedeUserID__ckMd5=064ef4c354350448; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NTE0NDkzOTMsImlhdCI6MTc1MTE5MDEzMywicGx0IjotMX0.A9aYdaUxEbP4WMe4rKpuralPgDVlPoi1jUlrXhvr4QY; bili_ticket_expires=1751449333; rpdid=|(Jll|J~RmmJ0J'u~luum|k||; sid=89gkqr7f; browser_resolution=1707-791; bp_t_offset_490105042=1083845786103447552; CURRENT_FNVAL=4048"
        }
        self.params_page1 = {
            'oid':'81310636',
            'type':'1',
            'mode':'2',
            'pagination_str':'{"offset":""}',
            'plat':'1',
            'seek_rpid':'',
            'web_location':'1315875',
            'w_rid':'e809c87717a82ed0044af574cee8150c',
            'wts':'1751190961'
            } 
        self.first_page = True
        self.params_nextpage = {}
    
    def request_response(self):
        if self.first_page:
            resp = requests.get(self.url,headers=self.headers,params=self.params_page1)
            resp.encoding = 'utf-8'
            json_data = resp.json()
            self.params_nextpage = self.nextpage_params(json_data)
            self.first_page = False
            self.save_comment(json_data)
            print('------------下一次请求等待1秒--------------')
            time.sleep(1)
            self.request_response()
        else:
            resp = requests.get(self.url,headers=self.headers,params=self.params_nextpage)
            resp.encoding = 'utf-8'
            json_data = resp.json()
            self.params_nextpage = self.nextpage_params(json_data)
            self.save_comment(json_data)
            print('------------下一次请求等待1秒--------------')
            time.sleep(1)
            is_end = json_data.get('data').get('cursor')['is_end']
            if not is_end:
                self.request_response()
            else:
                print('已经获取完全部数据')

    # this code is for take w_rid
    def nextpage_params(self,json_data):
        pagination_str = dict()
        pagination_str["offset"] = json_data.get('data').get('cursor').get('pagination_reply')['next_offset']
        json_str = json.dumps(pagination_str,separators=(',',':'))
        encoded_str = quote(json_str)
        wts_time = int(time.time())
        l = [
                'mode=2',
                'oid=81310636',
                f'pagination_str={encoded_str}',
                'plat=1',
                'type=1',
                'web_location=1315875',
                f'wts={wts_time}'
            ]
        a = 'ea1db124af3c7062474693fa704f4ff8'
        v = '&'.join(l)
        string = v + a
        MD5 = hashlib.md5()
        MD5.update(string.encode(encoding='utf-8'))
        w_rid = MD5.hexdigest()
        params_nextpage = {
            'oid':'81310636',
            'type':'1',
            'mode':'2',
            'pagination_str':json_str,
            'plat':'1',
            'web_location':'1315875',
            'w_rid':w_rid,
            'wts':wts_time
        }
        return params_nextpage

    def save_comment(self,json_data):
        comment_1 = json_data.get('data').get('replies')
        with open('output.csv','a',encoding='utf-8',newline='') as f:
            csv_writer = csv.DictWriter(f,fieldnames=['name','addr','comment','reply_coment','reply_url'])
            for i in comment_1:
                try:
                    reply_coment = i.get('content')['sub_reply_entry_text']
                except KeyError:
                    reply_coment = ''
                try:
                    addr = i.get('reply_control')['location']
                except KeyError:
                    addr = ''
                reply_id = i['rpid_str']
                data = {
                    'name': i.get('member')['uname'],
                    'addr': addr,
                    'comment': i.get('content')['message'],
                    'reply_coment': reply_coment,
                    'reply_url': 'https://api.bilibili.com/x/v2/reply/reply?oid=81310636&type=1&root=' + reply_id + '&ps=10&pn=1&web_location=333.788',
                    
                }
                csv_writer.writerow(data)

if __name__ == "__main__":
    comment = Comment()
    comment.request_response()
