import numpy as np
import database
from Crawl import *
from analyse_dep import get_dep_name, normalize_range, get_require_versions, get_require_versions_with_not_equal
import global_var

def count_id():
    count = 0
    for i in range(1, 58):
        pkg_dep = np.load("./pkg_id/pkg_id_%s.npy" % i, allow_pickle=True).tolist()
        count += len(pkg_dep)
    print("total id num is: %s" % count)

def test_db():
    db = database.DataBase()
    # db.print_node_labels()
    # db.print_relation_types()
    # print(db.match_proj_by_name('scikit-learn')['repository_url'])
    node2 = db.match_pkg_by_id('numpy@1.9.1')
    rela_list = db.query_relations_by_node2(node2, 'affect_pkg')
    print(rela_list[0].start_node)

def save_to_db():
    db = database.DataBase()
    break_point = np.load("./break_point.npy").tolist()
    start = break_point[1] + 1
    end = break_point[2] + 1
    print("from file %s to file %s" % (start, end-1))
    for file_count in range(start, end):
        db.add_pkgs(file_count)
    db.add_projs()
    # db.add_contributors()
    db.add_pkg_proj_relation(start, end)
    db.add_proj_contri_relation(start, end)
    db.add_pkg_pkg_relation(start, end)
    # db.add_vul_pkg_relation()
    # np.save("./break_point.npy", [break_point[0], break_point[2], break_point[2]])

def crawl_abnormal_dep():
    break_point = np.load("./break_point.npy").tolist()  # break_point存了三个参数，第一个是处理到的包名index，第二个已存进数据库的最后一个文件index，第三个是目前的最后一个文件index
    file_count = break_point[2] + 1  # 下一个文件名index
    print(file_count)
    global_var.pkg_list = np.load("./pkg_list.npy").tolist()
    global_var.ver_empty_list = np.load("./ver_empty_list.npy").tolist()
    global_var.abnormal_pkg_list = np.load("./abnormal_pkg_list.npy").tolist()
    global_var.error_pkg_list = np.load("./error_pkg_list.npy").tolist()
    global_var.proj_info = np.load("./proj_info.npy", allow_pickle=True).item()
    all_dict = global_var.load_ver_dict()
    print(len(all_dict))
    pkg_set = set(global_var.pkg_list)
    for each in global_var.abnormal_pkg_list:
        global_var.pkg_queue.put(each)
    # global_var.abnormal_pkg_list.clear()

    count = 1
    while not global_var.pkg_queue.empty():
        print(count)
        cur_pkg = global_var.pkg_queue.get()

        if global_var.proj_info.get(cur_pkg) is None:
            global_var.proj_info[cur_pkg] = crawl_proj_info(cur_pkg)

        if all_dict.get(cur_pkg) is not None:
            ver_list = all_dict[cur_pkg]
        else:
            if crawl_pkg_version(cur_pkg) == -1:
                count += 1
                continue
            ver_list = global_var.ver_dict[cur_pkg]
        print("start crawl %s %s version" % (cur_pkg, len(ver_list)))
        for ver in ver_list:
            id = cur_pkg + "@" + ver
            global_var.pkg_dep.append({'id': id, 'dependence': crawl_pkg_dep(id)})
            dependencies = global_var.pkg_dep[-1]['dependence']
            for dep in dependencies:  # 对每个依赖项入队前检查
                dep_name = get_dep_name(dep['name'])
                if dep_name is None or dep_name in pkg_set:
                    continue
                else:
                    pkg_set.add(dep_name)
                    global_var.pkg_list.append(dep_name)
                    global_var.pkg_queue.put(dep_name)
            print("%s ok!" % id)

        print("queue length: %s" % len(global_var.pkg_queue.queue))
        count += 1

    if count % 50 == 0 or global_var.pkg_queue.empty():  # 每隔50个包名存一次
        np.save("./pkg_dep/pkg_dep_%s.npy" % file_count, global_var.pkg_dep)
        np.save("./pkg_id/pkg_id_%s.npy" % file_count, global_var.pkg_id)
        np.save("./pkg_ver/pkg_ver_%s.npy" % file_count, global_var.ver_dict)
        np.save("./pkg_list.npy", global_var.pkg_list)
        # np.save("./abnormal_pkg_list.npy", global_var.abnormal_pkg_list)
        np.save("./ver_empty_list.npy", global_var.ver_empty_list)
        np.save("./error_pkg_list.npy", global_var.error_pkg_list)
        np.save("./proj_info.npy", global_var.proj_info)
        np.save("./break_point.npy", [len(global_var.pkg_list), break_point[1], file_count])
        np.save("./proj_info.npy", global_var.proj_info)
        global_var.clear_all()
        print("batch %s is ok!" % file_count)
        file_count += 1

    print("done!")

def check_require():
    pkg_dep = np.load("./pkg_dep/pkg_dep_1.npy", allow_pickle=True).tolist()
    for i in range(5000, 6000):
        print(pkg_dep[i]['dependence'])

def get_info():
    global_var.pkg_list = np.load("./pkg_list.npy").tolist()
    count = 1
    for each in global_var.pkg_list:
        print(count)
        global_var.proj_info[each] = crawl_proj_info(each)
        count += 1
    np.save("./proj_info.npy", global_var.proj_info)
    print("sucessfully get info!")
    global_var.pkg_list.clear()
    global_var.proj_info.clear()

def get_contributors():
    global_var.pkg_list = np.load("./pkg_list.npy").tolist()
    print(len(global_var.pkg_list))
    count = 1
    file_count = 1
    for each in global_var.pkg_list:
        print(count)
        global_var.contributor_dict[each] = crawl_contributors(each)

        if count % 500 == 0 or count == len(global_var.pkg_list):
            np.save("./contributor/contributor_%s.npy" % file_count, global_var.contributor_dict)
            print("finish file %s" % file_count)
            file_count += 1
            global_var.contributor_dict.clear()

        count += 1

    print("sucessfully get contributors!")
    global_var.pkg_list.clear()

def merge_contributor():
    contri_set = set()
    contri_list_all = []
    for file_count in range(1, 34):
        contributor_dict = np.load("./contributor/contributor_%s.npy" % file_count, allow_pickle=True).item()
        for key in contributor_dict:
            contri_list = contributor_dict[key]
            if isinstance(contri_list, int):
                continue
            for contri in contri_list:
                if contri['github_id'] in contri_set:
                    continue
                else:
                    contri_set.add(contri['github_id'])
                    contri_list_all.append(contri)

    print("collect %s contributors" % len(contri_list_all))
    np.save("./contributor/contributor_list.npy", contri_list_all)

def print_info():
    proj_info = np.load("./proj_info.npy", allow_pickle=True).item()
    print(proj_info)

def test_analyse_dep(id):
    all_dict = global_var.load_ver_dict()
    deps = crawl_pkg_dep(id)
    for dep in deps:
        dep_name = get_dep_name(dep['name'])
        requirements = dep['requirements']
        ver_list = all_dict.get(dep_name)
        if ver_list is None:
            continue
        ver_satisfy = []
        range_str = normalize_range(requirements)
        if range_str is None:
            continue
        if "!=" in range_str:
            ver_satisfy.extend(get_require_versions_with_not_equal(range_str, ver_list))
        else:
            ver_satisfy.extend(get_require_versions(range_str, ver_list))

        if len(ver_satisfy) > 0:
            global_var.pkg_dep_pkg.append({'pkg_a': id, 'pkg_b': dep_name + "@" + ver_satisfy[-1]})

    for r in global_var.pkg_dep_pkg:
        print(r)


if __name__ == '__main__':

    # count_id()
    # check_abnormal()
    # for each in global_var.abnormal_pkg_list:
    #     print(len(global_var.ver_dict[each]))
    # crawl_abnormal_dep()
    # check_require()
    # get_info()
    # get_contributors()
    # test_db()
    # merge_contributor()
    # print((int)(1 == 2))
    # a = np.load("./break_point.npy").tolist()
    # print(a)
    test_analyse_dep('Scrapy@1.8.2')
    # str = "(>=3.2) ; extra == 'async'"
    # if '(' in str and ')' in str:
    #     s = str.index('(')
    #     e = str.index(')')
    #     str = str[s+1:e]
    #     print(str)