# -*- coding: utf-8 -*-
import json
import socket
import urllib.request as api_request
import urllib.error as api_error
import time

from crawler.tools import get_random_user_agent, get_random_lib_key


class TopProjectCrawler:

    @staticmethod
    def get_top1000list():
        # 获取star排名前10000的项目仓库
        pkg_10000_list = []
        for page in range(1, 101):
            # 构建API请求
            api_key = get_random_lib_key()
            url = f"https://libraries.io/api/search?platforms=Maven&sort=stars&order=desc&page={page}&per_page=100&api_key={api_key}"
            print(url)

            # 调用API
            header = {'User-Agent': get_random_user_agent()}
            r = api_request.Request(url, headers=header)
            response = api_request.urlopen(r, timeout=30)
            response_list = json.load(response)

            # 处理返回信息
            pkg_10000_list.extend(response_list)

        print(len(pkg_10000_list))
        project_repo_list = []
        for pkg in pkg_10000_list:
            if pkg["repository_url"] not in project_repo_list:
                project_repo_list.append(pkg["repository_url"])
            # 只取排名前1000的
            if len(project_repo_list) == 1000:
                break

        return project_repo_list

