import os
import random
import time
from urllib.parse import quote

import requests
from bs4 import BeautifulSoup

'''
b站关键词搜素结果下载
'''
user_agent = [
 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203'
]

hd = {'User-Agent': random.choice(user_agent)}
cookies="buvid3=4882AE2F-547F-76CC-CAB7-3886677DB1D088595infoc; b_nut=1689943188; i-wanna-go-back=-1; b_ut=7; _uuid=8A104D998-C4DD-9D5A-D1081-D9ECE10B5356189267infoc; FEED_LIVE_VERSION=V8; home_feed_column=5; browser_resolution=1865-937; buvid_fp=6cc5362bcebe6d68f1581c2362e4ead5; buvid4=036F0D59-B815-8C90-45CE-A3ADB355E0CB89930-023072120-vdBjAnaBZv4hXXyGJpmiiQ%3D%3D; CURRENT_BLACKGAP=0; rpdid=|(J|YJuYl|Y|0J'uY)mmRJk|R; header_theme_version=CLOSE; bili_ticket=eyJhbGciOiJFUzM4NCIsImtpZCI6ImVjMDIiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2OTIzNDQ1MzYsImlhdCI6MTY5MjA4NTMzNiwicGx0IjotMX0.fGn6dEszGvas1V2WgR5U7T21brNbzCwPKXY8cKb7vzxu1PPQIYOMs9ZTMDyjNIsMNAS2R94_XmyTtg2iaeHiCb1cDjgEpI2Cezlh3UcZ4z0R1kzhi2RAKqOXIgJa7wTv; bili_ticket_expires=1692344536; CURRENT_FNVAL=16; SESSDATA=eea37860%2C1707639736%2C0bca6%2A820eKSI1Z0k_ESyRk7IgG3275R4qwMP8L1eHM_sLmRskOhf9-YfQsnP8o9Tooca_eImQJ3bQAATQA; bili_jct=57f8a6f5c83960faa0893898274d0cd2; DedeUserID=263241977; DedeUserID__ckMd5=108d3bf46fa412d8; sid=6bvljgi3; bp_video_offset_263241977=829977089652293683; b_lsid=E1017442A_189F8516D1D; PVID=1"

# 访问网页获取网页信息
def geturlinfo(url):
    try:
        r = requests.get(url, timeout=30, headers=hd,cookies=cookies)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ''


# 参数为第一页地址
#https://search.bilibili.com/all?keyword=beautyleg&from_source=webtop_search&spm_id_from=333.851&order=pubdate&page=2&o=36
# https://search.bilibili.com/all?keyword=%E5%BE%90%E9%9B%85&from_source=nav_suggest_new
# https://search.bilibili.com/all?keyword=%E5%BE%90%E9%9B%85&from_source=nav_suggest_new&page=2
# https://search.bilibili.com/all?keyword=%E5%B0%8F%E6%B7%B1%E6%B7%B1&from_source=nav_search_new&order=totalrank&duration=1&tids_1=129
def getvideourl(url, urlfilepath, page_count):
    # page_count = 60
    page = 1
    while page <= page_count:
        url1 = url
        if page > 1:
            url1 = url + "&page=%s&o=%s" % (page,(page-1)*36)
        print(url1)
        try:
            text = geturlinfo(url1)
            soup = BeautifulSoup(text, "html.parser")
            # aa = soup.find_all("a", class_="img-anchor")
            aa = soup.find_all("a")
            print("page:" + str(page))
            page += 1
            for tag in aa:
                video_url = tag.attrs["href"]
                video_url = video_url.split("?")[0].replace("//", "")
                print(video_url)
                with open(urlfilepath, "a+", encoding="utf-8") as urltxt:
                    urltxt.write(video_url + "\n")
        except Exception as e:
            print(e)
            continue


def downloadvideo(url, filename, dir_name):
    if not os.path.exists(dir_name):  # os模块判断并创建
        os.mkdir(dir_name)
    dir_name = dir_name + filename  # 设置文件夹的名字
    if not os.path.exists(dir_name):  # os模块判断并创建
        os.mkdir(dir_name)
    urls = open(url)
    for video_url in urls:
        try:
            # time.sleep(random.randint(0, 2))
            print(video_url)
            # 手动在浏览器登录后
            os.system('%s/you-get.exe -fao %s %s' % (os.path.dirname(os.path.abspath(__file__)),dir_name, video_url.split("\n")[0]+'/?vd_source=57f8a6f5c83960faa0893898274d0cd2'))  # 调用you-get方法挨个下载该次循环的所有视频
            with open(url + "log", "a+", encoding="utf-8") as vv:
                vv.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                vv.write("   " + video_url + "\n")
            print("download success")
            print("=================================================================")
        except:
            continue

# https://www.bilibili.com/video/BV1G4411c7N4?p=1
# asmr中国主播
# 喵老师、小女巫露娜、日南小姐姐、柳婉音
# 轩子2巨兔、可爱的埋埋、丸子君、菟菟萌酱、贝拉小姐姐、灵性饱满的欢欢、丧妹
# 麻醉猫、芝麻对辣、是幼情呀、神楽坂真冬、恩七不甜
# 步非烟、雪儿圆圆、蜜奈儿、Aki秋水、渔子溪、尤优baby、二呆酱、小太阳贼大
# 1月7号
# https://search.bilibili.com/all?keyword=%E5%B0%8F%E6%B7%B1%E6%B7%B1&from_source=nav_search_new&order=totalrank&duration=1&tids_1=129
if __name__ == "__main__":
    download_geturlfile = "1"  # 1 仅获取视频路径保存至文件 2 仅下载视频 3 获取视频路径保存至文件并下载视频
    serch_name = "徐雅"
    serch_name_u = quote(serch_name, encoding="utf-8")
    print(serch_name_u)
    # url = "https://search.bilibili.com/all?keyword=" + serch_name_u + "&from_source=nav_suggest_new"
    # url = "https://search.bilibili.com/all?keyword=" + serch_name_u + "&from_source=nav_suggest_new&order=pubdate&duration=0&tids_1=129"
    # url = "https://search.bilibili.com/all?keyword=" + serch_name_u + "&from_source=nav_search_new&order=totalrank&duration=1&tids_1=129"
    url = "https://search.bilibili.com/all?keyword=%s&from_source=webtop_search&spm_id_from=333.851&order=pubdate" % serch_name_u
    print(url)
    urlfilepath = "E:/bilibilivideobypy/"+serch_name+"url.txt"
    dir_name = 'D:/bilibilivideobypy/'  # 设置文件夹的名字
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    if "1" == download_geturlfile or "3" == download_geturlfile:
        if os.path.exists(urlfilepath):
            # os.rename(urlfilepath, urlfilepath + '.bak')
            os.remove(urlfilepath)
        getvideourl(url, urlfilepath, 50)
    if "2" == download_geturlfile or "3" == download_geturlfile:
        downloadvideo(urlfilepath, serch_name, dir_name)
