# -*- coding: utf-8 -*-
import json
import random
import time
import socket
import urllib.request as api_request
import urllib.error as api_error

from crawler.tools import get_random_user_agent
from crawler.tools import get_random_lib_key


class ContributorCrawler:

    def __init__(self, pkg):
        self.pkg = pkg

    def crawl_contributor_list(self):
        # 爬取所有的贡献者
        page = 1
        contributor_list = []
        while True:
            # 构建API请求
            api_key = get_random_lib_key()
            url = f"https://libraries.io/api/maven/{self.pkg}/contributors?api_key={api_key}&page={page}&per_page=100"
            print(url)
            # 调用API
            header = {'User-Agent': get_random_user_agent()}
            r = api_request.Request(url, headers=header)
            try:
                response = api_request.urlopen(r, timeout=30)
                contributor_list_100 = json.load(response)
            except api_error.HTTPError as e:
                if e.code == 429:
                    print(f"错误429: 爬取过快，尝试重新爬取项目贡献者 {self.pkg}")
                    time.sleep(10)
                    continue
                else:  # 贡献者不存在
                    print(f"错误{e.code}: 无法通过API请求获取贡献者 {self.pkg}")
                    time.sleep(10)
                    continue
            except socket.error:  # 网速问题
                print(f"出错啦! 尝试重新爬取贡献者 {self.pkg}")
                time.sleep(10)
                continue
            # 不存在更多贡献者
            if len(contributor_list_100) == 0:
                break
            # 循环爬取
            time.sleep((1 + random.random()) * 0.25)
            page += 1
            contributor_list.extend(contributor_list_100)

        # 返回
        print(f"{self.pkg} 贡献者数目: {len(contributor_list)}")
        return contributor_list
