import random
import time
import requests
from bs4 import BeautifulSoup

from crawler.tools import get_random_user_agent


class TopPackageCrawler:

    def __init__(self):
        self.url = "https://libraries.io/search"

    def get_top1200list(self):
        print("开始爬取数据")
        maven_package_list = []
        for page in range(1, 41):
            # 构建爬虫
            params = {'order': 'desc', 'page': str(page), 'platforms': 'Maven', 'sort': 'rank'}
            header = {'User-Agent': get_random_user_agent()}
            html = requests.get(self.url, params=params, headers=header)
            soup = BeautifulSoup(html.text, "html.parser")

            # 爬取信息
            package_list = soup.select('body > div.container > div.row > div.col-sm-8 > div > h5 > a')

            # 信息处理
            for package in package_list:
                maven_package_list.append(package.get_text())

            # 进度更新
            print("\r", f"爬取进度：{round(page/40*100, 1)}%", end="", flush=True)

            # 反爬延迟
            time.sleep(3*(1+random.random()))

        print("\n数据爬取完毕")
        return maven_package_list
