import json
import time
import random
from typing import Dict, Set

from utils.downloader import HTMLDownloader
from utils.manager import URLManager
from utils.parser import HTMLParser


class Spider:
    """
    爬虫主程序
    """
    FILE_PATH = "data/person.jsonl"

    def __init__(self) -> None:
        self._manager = URLManager()
        self._parser = HTMLParser()
        self._downloader = HTMLDownloader()
        self._downloaded_urls = self._get_downloaded_urls()

    def execute(self, init_url: str, target: int=100000) -> None:
        self._execute(init_url, target)

    def _execute(self, init_url: str, target: int) -> None:
        """
        爬虫调度
        """
        self._display("开始爬虫")
        counter = 1

        # 加入初始 url
        self._manager.add_url(init_url)
        # 如果 url 管理器中有 url 则一直执行
        while self._manager.has_url():
            url = self._manager.get_url()
            self._display("第 {} 个:".format(counter), "*", 5)
            print(url)

            # 如果已经下载则跳过
            if url in self._downloaded_urls and counter > 1:
                self._display("已爬取", "*", 2)
                continue

            # 下载页面资源
            page, url = self._downloader.download(url)

            # 解析页面
            try:
                urls, data = self._parser.parse(page, url)
            except AttributeError as e:
                print(e)
                continue

            # 获取到的 url 添加到 url 管理器中
            if urls:
                self._manager.add_urls(urls)

            # 保存爬取的结果
            if data and url not in self._downloaded_urls:
                self._save(data)
            counter += 1
            if counter > target:
                break
            self._sleep()

    @staticmethod
    def _display(text: str, symbol: str="=", num: int=20) -> None:
        """
        格式化输出
        """
        line = symbol * num \
            + " " + text + " " \
            + symbol * num
        print(line)

    @staticmethod
    def _get_downloaded_urls() -> Set[str]:
        """
        获取已下载结果
        """
        with open(Spider.FILE_PATH, "r") as f:
            data = [json.loads(i)["url"] for i in f.readlines() if i.strip()]
        return set(data)

    @staticmethod
    def _sleep() -> None:
        """
        随机休眠，模拟用户点击耗时
        """
        time.sleep(random.random() * 5 + 1)

    @staticmethod
    def _save(data: Dict[str, str]) -> None:
        """
        结果保存
        """
        with open(Spider.FILE_PATH, "a") as f:
            line = json.dumps(data, ensure_ascii=False)
            f.write(line + "\n")


if __name__ == "__main__":
    spider = Spider()
    init_url = "https://baike.baidu.com/item/黄晓明/6597"
    spider.execute(init_url)
