def check_lib():
    libs = ["bs4", "aiohttp"]
    import os
    url = r'https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/'  # 清华镜像网站
    try:
        import bs4
        import aiohttp
    except ModuleNotFoundError:
        print('Failed SomeHow')
        for lib_name in libs:
            print("Start install {0}".format(lib_name))
            os.system('pip install %s -i %s' % (lib_name, url))
            print('{0} install successful'.format(lib_name))
        print('All install successful ')

check_lib()
import requests
from bs4 import BeautifulSoup
import os


#b站排行榜url
B_RANK_URL = "https://www.bilibili.com/v/popular/rank/"

#不同分区api
B_RANK_APIS = [
    "all", #全部
    "bangumi", # 番剧
    "guochan", # 国产动画
    "guochuang", # 国创相关
    "documentary", #纪录片
    "douga", #动画
    "music", #音乐
    "dance", #舞蹈
    "game", #游戏
    "knowledge", #知识
    "tech", #科技
    "sports", #运动
    "car", #汽车
    "life", #生活
    "food", #美食
    "animal", #动物圈
    "kichiku", #鬼畜
    "fashion", #时尚
    "ent", #娱乐
    "cinephile", #影视
    "movie", #电影
    "tv", #电视剧
    "origin", #原创
    "rookie", #新人
]


class RankItem:
    def __init__(self):
        self.rank = ""  # 排名
        self.video_name = ""  # 视频名
        self.video_url = ""  # 视频地址
        self.up_name = ""    # up名
        self.view_counts = ""    # 播放量
        self.like_counts = ""    # 弹幕数

    def __repr__(self):
        return self.__str__()

    def __str__(self):
        return str((self.rank, self.video_name, self.video_url,
                self.up_name, self.view_counts, self.like_counts))

def parse_rank_item(li_item:BeautifulSoup) -> RankItem:
    """
    从li标签中解析出RankItem
    :param li_item:
    :return: RankItem
    """
    item = RankItem()
    item.rank = li_item.attrs['data-rank']
    info = li_item.find(name="a", class_="title")
    item.video_name = info.string
    item.video_url = info.attrs["href"]

    span_list = li_item.find_all(name="span", class_="data-box")
    item.up_name = span_list[0].text.strip()
    item.view_counts = span_list[1].text.strip()
    item.like_counts = span_list[2].text.strip()
    return item

def parse_rank_html(html_str: str, debug=False) -> [RankItem]:
    """
    解析html 获取所有RankItem
    :param html_str:
    :param debug:
    :return: List[RankItem]
    """
    res_rank_items = list()
    soup = BeautifulSoup(html_str, "lxml")
    rank_list = soup.find_all(name="ul", class_="rank-list")[0]
    for rank_item in rank_list.find_all(name="li", class_="rank-item"):
        item = parse_rank_item(rank_item)
        # if debug:
        #     print(item)
        res_rank_items.append(item)

    return res_rank_items

def write_local_data(items:[RankItem], fileName:str):
     with open(fileName, "wb") as f:
        for item in items:
            f.write((str(item) + "\n").encode("utf-8"))

def create_floder(path:str):
    if not os.path.exists(path):
        os.mkdir(path)

if __name__ == "__main__":
    resp = requests.get(B_RANK_URL)
    ranks = parse_rank_html(resp.text)
    for each in ranks:
        print(str(each))

    write_local_data(ranks, "1.txt")


