from openpyxl import Workbook, load_workbook
from openpyxl.styles import *
from pathlib import Path
import requests
import json
import time
import datetime
import warnings
warnings.filterwarnings('ignore')


headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"
}

key_list = ['aid', 'author', 'bvid', 'url', 'comment',
            'copyright', 'created', 'description', 'hide_click', 'is_live_playback',
            'is_pay', 'is_steins_gate', 'is_union_video', 'length', 'mid',
            'pic', 'play', 'review', 'subtitle', 'title',
            'typeid', 'video_review']
str_key_list = ['aid', 'created', 'mid']
title_list = ["av号", "up主", "bv号", "视频链接", "评论",
              "版权", "创建时间", "简介", "", "",
              "", "", "是否联合投稿", "视频长度", "up主id",
              "视频封面链接", "播放数", "", "子标题", "标题", "类型id", ""]


def get_total_page(uid):
    total_xpath = '//div[@id="submit-video-list"]//span[contains(@class,"be-pager-total")]'
    url = "https://api.bilibili.com/x/space/arc/search?mid=%s&ps=30&tid=0&pn=%s&keyword=&order=pubdate&jsonp=jsonp" % (
        uid, 1)
    resp = requests.get(url, headers=headers)
    resp.encoding = "utf-8"
    if resp.ok:
        page_info = json.loads(resp.text)['data']['page']
        count = int(page_info['count'])
        ps = int(page_info['ps'])
        if(count % ps != 0):
            return int(count / ps) + 1
        else:
            return count / ps


def convert_to_info(item):
    info = []
    for key in key_list:
        if key == "url":
            value = "https://www.bilibili.com/video/%s" % item['bvid']
        elif(key in str_key_list):
            value = str(item[key])
        else:
            value = item[key]
        info.append(value)
    return info


def get_info(uid, total_page, wait_time=1):
    info_list = []
    for page_no in range(1, total_page+1):
        print("current page is %s,total page is %s" % (page_no, total_page))
        url = "https://api.bilibili.com/x/space/arc/search?mid=%s&ps=30&tid=0&pn=%s&keyword=&order=pubdate&jsonp=jsonp" % (
            uid, page_no)
        resp = requests.get(url, headers=headers)
        resp.encoding = "utf-8"
        if resp.ok:
            vlist = json.loads(resp.text)['data']['list']['vlist']
            for item in vlist:
                info_list.append(convert_to_info(item))
        time.sleep(wait_time)
    return info_list


def write_to_excel(info_list, save_path, uid):
    now = datetime.datetime.now()
    time_stamp = now.strftime("%Y%m%d%H%M%S")
    file_name = "%s_%s.xlsx" % (uid, time_stamp)
    path = Path(save_path)
    if(path.exists() == False):
        path.mkdir(parents=True)
    wb = Workbook()
    ws = wb.active
    ws.append(key_list)
    for info in info_list:
        ws.append(info)
    wb.save(str(path.joinpath(file_name)))


if __name__ == "__main__":
    save_path = r"D:\\"
    uid = "37663924"
    wait = 3
    total_page = get_total_page(uid)
    print("total page is %s" % total_page)
    info_list = get_info(uid, total_page, wait)
    write_to_excel(info_list, save_path, uid)
