from pathlib import Path
from queue import Queue
import sys
import time
from threading import Thread

import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.edge.service import Service as EdgeService
from webdriver_manager.microsoft import EdgeChromiumDriverManager


class DynamicSpider(object):
    def __init__(self, uid: list):
        self.url = f"https://space.bilibili.com/{uid}/dynamic"
        self.uid = uid
        self.download_thread_n = 0
        # 打开无头浏览器
        print("正在初始化浏览器 ... ...")
        self.options = webdriver.EdgeOptions()
        self.options.add_argument("--headless")
        self.browser = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()), options=self.options)
        print("初始化浏览器成功！")
        self.img_urls = Queue()
        # 创建下载文件夹
        self.path = Path("./download")
        if not self.path.exists():
            self.path.mkdir()

    def _scroll_to_bottom(self):
        """尽可能地模拟鼠标滚轮的操作"""
        print("正在滑动到最底部 ... ...")
        while True:
            while self.browser.execute_script("return window.scrollY + window.innerHeight < document.body.scrollHeight"):
                self.browser.execute_script("window.scrollBy(0, 105)")
            self.browser.execute_script("window.scrollBy(0, -105)")  # 为了避免滑动过快导致页面未到底部却不更新
            time.sleep(0.1)
            self.browser.execute_script("window.scrollBy(0, 105)")
            time.sleep(1)
            if self.browser.execute_script("return window.scrollY + window.innerHeight >= document.body.scrollHeight"):
                break
        print("已到达世界的尽头！")

    def _get(self):
        print(f"正在打开网页 {self.uid} ... ...")
        self.browser.get(self.url)
        print("成功打开网页！")
        time.sleep(1)

    def _parse(self):
        print("正在处理网页 ... ...")
        imgs = self.browser.find_elements(By.CLASS_NAME, "bili-awesome-img")
        styles = [img.get_attribute("style") for img in imgs]
        for style in styles:
            self.img_urls.put("https:" + style.split('@')[0].split('"')[-1])
            self.download_thread_n += 1
        print("网页处理完毕！")

    def _save(self):
        while not self.img_urls.empty():
            url = self.img_urls.get()
            img_byte = requests.get(url=url).content
            with open(f"./download/{url.split('/')[-1]}", "wb") as img:
                img.write(img_byte)

    def _download(self):
        threads = []
        print("获取 URL 完毕，开始下载 ... ...")
        for i in range(self.download_thread_n // 2):
            new_thread = Thread(target=self._save)
            new_thread.start()
            threads.append(new_thread)
        for old_thread in threads:
            old_thread.join()
        print("下载完毕！")

    def _filter(self):
        """筛掉小于 10kb 的文件"""
        print("正在筛选文件 ... ...")
        path_download = Path("./download")
        if not path_download.exists():
            return
        for each_img in path_download.iterdir():
            if each_img.stat().st_size  < 10240:
                each_img.unlink()
        print("筛选完毕！")

    def run(self):
        self._get()
        self._scroll_to_bottom()
        self._parse()
        self._download()
        self._filter()
        self.browser.quit()


if __name__ == '__main__':
    if len(sys.argv) < 2:
        exit()
    else:
        uids = sys.argv[1:]
        for uid in uids:
            DynamicSpider(uid).run()
