File size: 1,701 Bytes
c69cba4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import json
import argparse
import requests
from typing import List


def get_repositories_names(token: str, min_stars: int) -> List[str]:
    repos_per_page = 100
    repo_names = []
    i = 0
    while True:
        url = \
            f'https://api.github.com/orgs/huggingface/repos?' \
            f'per_page={repos_per_page}&page={i}'
        headers = {'Authorization': f'token {token}'}
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            repos = json.loads(response.content)
            repo_names += [
                repo['full_name'] for repo in repos
                if repo['stargazers_count'] >= min_stars
            ]
            if len(repos) < repos_per_page:
                break
            i += 1
        else:
            return 'Error: '+str(response.status_code)
    return list(set(repo_names))


def save_repositories_urls(repositories_names: List[str], output_filename: str):
    urls = [f'https://github.com/{repo_name}' for repo_name in repositories_names]
    data = {'urls': urls}
    with open(output_filename, 'w') as f:
        json.dump(data, f, indent=4)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--token', type=str)
    parser.add_argument('--stars', type=str)
    args = parser.parse_args()
    repositories = get_repositories_names(token=args.token, min_stars=int(args.stars))
    repositories += [
        'huggingface/hf-endpoints-documentation',
        'gradio-app/gradio'
    ]
    print(f'Found {len(repositories)} repositories with at least {args.stars} stars')
    save_repositories_urls(repositories, 'datasets/hf_repositories_urls_scraped.json')