from Crawl import crawl_pkg_repo, crawl_github_api
import numpy as np
import os
import re
import time
from random import uniform
import database

def crawl_info():
    repo_detail = {}
    commits = {}
    issues = {}
    count = 1
    for url in repo_only_list:
        if count <= 900:
            count += 1
            continue
        print(count)
        pattern = r'(.*)(github.com/)(.*)'
        match = re.search(pattern, url)
        if match is None:
            print("%s is not github url" % url)
            continue
        api_repo_url = url[0:match.regs[1][1]] + "api." + url[match.regs[2][0]:match.regs[2][1]] + "repos/" + url[match.regs[3][0]:]
        api_commits_url = api_repo_url + '/commits'
        api_issues_url = api_repo_url + '/issues'
        print(api_repo_url)
        repo_detail[url] = crawl_github_api(api_repo_url)
        sleep_time = uniform(0.4, 0.6)
        time.sleep(sleep_time)

        # 爬issue直到返回空列表
        page = 1
        issues[url] = []
        while 1:
            new_api = api_issues_url + "?page=%s&per_page=100" % page
            res = crawl_github_api(new_api)
            if len(res) == 0:
                break
            issues[url].extend(res)
            sleep_time = uniform(0.2, 0.4)
            time.sleep(sleep_time)
            page += 1

        print()

        # 爬1000个commit记录
        commits[url] = []
        for page in range(1, 11):
            new_api = api_commits_url + "?page=%s&per_page=100" % page
            res = crawl_github_api(new_api)
            if len(res) == 0:
                break
            commits[url].extend(res)
            sleep_time = uniform(0.2, 0.4)
            time.sleep(sleep_time)

        if count % 50 == 0:
            file_count = (int)(count / 50)
            np.save("./stars/repo_commits_%s.npy" % file_count, commits)
            np.save("./stars/repo_issues_%s.npy" % file_count, issues)
            commits.clear()
            issues.clear()
        count += 1

    if len(commits) != 0:
        file_count = (int)(count / 50) + 1
        np.save("./stars/repo_commits_%s.npy" % file_count, commits)
        np.save("./stars/repo_issues_%s.npy" % file_count, issues)
    print("totolly has %s repo_details" % len(repo_detail))
    np.save("./stars/repo_detail.npy", repo_detail)

def save_to_db():
    db = database.DataBase()
    repo_detail = np.load("./stars/repo_detail.npy", allow_pickle=True).item()
    db.add_repository(repo_detail)
    for file_count in range(1, 26):
        print(file_count)
        commits = np.load("./stars/repo_commits_%s.npy" % file_count, allow_pickle=True).item()
        issues = np.load("./stars/repo_issues_%s.npy" % file_count, allow_pickle=True).item()
        db.add_commits(commits)
        db.add_issues(issues)

if __name__ == '__main__':
    # 爬stars前10000的包名及其对应仓库
    repo_list = [] # 存包名和仓库
    repo_only_list = []  # 存仓库
    if not os.path.exists("stars/repo_list.npy"):
        pages = 100 # api最大可以查询每页100个
        for page in range(pages):
            repo_list.extend(crawl_pkg_repo(page+1)) # 从第一页开始爬取，第零页和第一页内容相同
        np.save("stars/repo_list.npy", repo_list)
        # 获取stars前1000的仓库（去重）
        repo_set = set()
        for each in repo_list:
            url = each['url']
            if url is not None and url not in repo_set:
                repo_only_list.append(url)
                repo_set.add(url)
        np.save("stars/repo_only_list.npy", repo_only_list)
    else:
        repo_list = np.load("stars/repo_list.npy", allow_pickle=True).tolist()
        repo_only_list = np.load("stars/repo_only_list.npy", allow_pickle=True).tolist()
    print("totolly has %s pkgs" % len(repo_list))
    print("totolly has %s repos" % len(repo_only_list))

    # 获取每个仓库的详细repo、commit列表、issue列表
    if not os.path.exists("stars/repo_detail.npy"):
        print("start crawl the infomation which we need")
    crawl_info()

    # 将爬下来的本地文件中信息提取出来并存到数据库中
    print("start save nodes and relationships to db")
    save_to_db()