import requests
import urllib.request as req
from urllib import error
import json
import os
import pandas as pd
import math
import time
from random import randint
import socket
import global_var
import numpy as np

# user-agent池
USER_AGENTS = [
 "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
 "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
 "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
 "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
 "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
 "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
 "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
 "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
 "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
 "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
 "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
 "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
 "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
 "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
 "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
]

# api_key池
API_KEY = [
    "1e5deaaa74515816880b36bf28c5a569",
    "fd58a5df21d2e87f9c99e10adfba1f1f",
    "302bafaa6ecda254166e0bec2d79ba12",
    "d9767a0692bdfc4db28507b950196d4c",
    "24f54c0d8f551f17bb68d92141c7de32",
    "50c21d685deb200bbc04b526698f2664",
    "06640f9d5e4e22df7008592cfe25e276",
    "8a0c01e477e76a9ca1133464555002f0",
    "dad0eac0ba4fd47ec6635c50e42f3b50",
    "dad0eac0ba4fd47ec6635c50e42f3b50",
    "289ceecb87390346d78acfd9d83d49d1",
    "4f3b149c0a5353fa2c8d8359092c58ec",
    "55f248d69cf3846aac2ac6c869f85fb4",
    "ec4f906c91dcc86b4dae568465dd0259"
]
per_page = 100


# 开启代理服务后，通过5010端口随机获取一个代理
def get_proxy():
    return requests.get("http://127.0.0.1:5010/get/").json()

# 删除一个代理
def delete_proxy(proxy):
    requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))

# 根据api请求获取每页的包名
def crawl_pkg_list(page, sort="dependents_count"):
    pkg_list = []
    api_key = API_KEY[randint(0, len(API_KEY) - 1)]
    url = "https://libraries.io/api/search?order=desc&page=%s&platforms=PyPI&sort=%s&per_page=100&api_key=%s" % (page, sort, api_key)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent}
    response = requests.get(url, headers=headers)
    response_dict = response.json()
    for each in response_dict:
        pkg_list.append(each['name'])
    print("successfully get page %d!\n" % page)
    return pkg_list

# 根据数量要求计算页数，从每页爬取包名
def crawl_pkg(total):
    pkg_list = []
    if os.path.exists("pkg_list.npy"):
        pkg_list = np.load("./pkg_list.npy").tolist()
    pkg_set = set(pkg_list)
    pages = math.ceil(total / per_page)
    print("start to get %s pages\n" % pages)
    for i in range(pages):
        tmp_list = crawl_pkg_list(i+1)
        for pkg in tmp_list:
            if pkg not in pkg_set:
                pkg_list.append(pkg)
                pkg_set.add(pkg)
    print("successfully get %s packages!\n" % len(pkg_list))
    np.save("./pkg_list.npy", pkg_list)
    # dataframe = pd.DataFrame({'name': pkg_list})
    # dataframe.to_csv("pkg_list.csv", index=False)

# 根据stars排序的包列表中获取每页的包地址
def crawl_pkg_repo(page, sort="stars"):
    repo_list = []
    api_key = API_KEY[randint(0, len(API_KEY) - 1)]
    url = "https://libraries.io/api/search?order=desc&page=%s&platforms=PyPI&sort=%s&per_page=100&api_key=%s" % (page, sort, api_key)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent}
    response = requests.get(url, headers=headers)
    response_dict = response.json()
    for each in response_dict:
        repo_list.append({'name': each['name'], 'url': each['repository_url']})
    print("successfully get page %d:%d!" % (page, len(response_dict)))
    return repo_list

# 根据包名获取包的版本信息，分别用字典和id列表进行保存
def crawl_pkg_version(name):
    name = name.strip()
    api_key = API_KEY[randint(0, len(API_KEY) - 1)]
    url_version = "https://libraries.io/api/PYPI/%s?api_key=%s" % (name, api_key)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent}
    r = req.Request(url_version, headers=headers)
    try:
        response = req.urlopen(r)
    except error.HTTPError as e:
        if e.code == 429 or e.code == 502:   #访问频繁被拒绝
            print("wait 30s for error %s while crawling pkg:%s version" % (e.code, name))
            time.sleep(30)
            ret = crawl_pkg_version(name)
            return ret
        else:   #获取包信息失败
            print(e.code)
            print("HTTP error no pkg: %s" % name)
            log = open("log.txt", "a")
            log.write("fail to get pkg %s version\n" % name)
            log.close()
            global_var.error_pkg_list.append(name)
            return -1
    except socket.error as e:
        time.sleep(10)
        print("sleep now")
        return crawl_pkg_version(name)
    response_dict = json.load(response)
    versions = response_dict['versions']
    global_var.ver_dict[name] = []
    if not versions:  # 版本号为空
        global_var.ver_empty_list.append(name)
        log = open("./log.txt", "a")
        log.write("empty pkg %s version\n" % name)
        log.close()
        return -1
    else:
        for i in range(len(versions)):
            version_num = versions[i]['number']  # 获取具体的版本号
            global_var.ver_dict[name].append(version_num)
            global_var.pkg_id.append(name + "@" + version_num)
    print("success to get pkg %s  %s versions" % (name, len(versions)))
    return 0

# 根据包的id进行依赖获取
def crawl_pkg_dep(id):
    id = id.strip()
    # api_key = "1e5deaaa74515816880b36bf28c5a569"
    api_key = API_KEY[randint(0, len(API_KEY) - 1)]
    if '@' not in id:
        return []
    name = id.split('@')[0]
    version = id.split('@')[1]
    url_dependence = "https://libraries.io/api/PYPI/%s/%s/dependencies?api_key=%s" % (name, version, api_key)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent}
    r = req.Request(url_dependence, headers=headers)
    # headers = [('User-Agent', random_agent)]
    # proxy = get_proxy().get('proxy')
    # proxy_handler = req.ProxyHandler({"http": "http://{}".format(proxy)})
    # opener = req.build_opener(proxy_handler)
    # opener.addheaders = headers
    # req.install_opener(opener)
    try:
        response = req.urlopen(r)
    except error.HTTPError as e:
        if e.code == 429 or e.code == 502:  # 访问频繁被拒绝
            print("wait 30s for error %s while crawling pkg:%s" % (e.code, id))
            time.sleep(30)
            ret = crawl_pkg_dep(id)
            return ret
        else:  # 获取包信息失败
            print(e.code)
            print("HTTP error no pkg: %s" % id)
            log = open("log.txt", "a")
            log.write("fail to get pkg %s dependence %s\n" % (id, e.code))
            log.close()
            return []
    except socket.error as e:
        print("sleep now")
        time.sleep(10)
        return crawl_pkg_dep(id)
    response_dict = json.load(response)
    # delete_proxy(proxy)
    return response_dict['dependencies']

# 根据项目名获取项目信息
def crawl_proj_info(name):
    name = name.strip()
    api_key = API_KEY[randint(0, len(API_KEY) - 1)]
    url_version = "https://libraries.io/api/PYPI/%s?api_key=%s" % (name, api_key)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent}
    r = req.Request(url_version, headers=headers)
    try:
        response = req.urlopen(r)
    except error.HTTPError as e:
        if e.code == 429 or e.code == 502:  # 访问频繁被拒绝
            print("wait 30s for error %s while crawling pkg:%s info" % (e.code, name))
            time.sleep(30)
            ret = crawl_proj_info(name)
            return ret
        else:  # 获取包信息失败
            print(e.code)
            print("HTTP error no pkg: %s" % name)
            log = open("log.txt", "a")
            log.write("fail to get pkg %s info\n" % name)
            log.close()
            return {}
    except socket.error as e:
        time.sleep(10)
        print("sleep now")
        return crawl_proj_info(name)
    response_dict = json.load(response)
    print("success to get pkg %s info" % name)
    return dict([(key, response_dict[key]) for key in ['description', 'homepage', 'repository_license', 'repository_url', 'stars']])

# 根据项目名获取贡献者
def crawl_contributors(name):
    name = name.strip()
    api_key = API_KEY[randint(0, len(API_KEY) - 1)]
    url_version = "https://libraries.io/api/PYPI/%s/contributors?api_key=%s" % (name, api_key)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent}
    r = req.Request(url_version, headers=headers)
    try:
        response = req.urlopen(r)
    except error.HTTPError as e:
        if e.code == 429 or e.code == 502:  # 访问频繁被拒绝
            print("wait 30s for error %s while crawling pkg:%s contributors" % (e.code, name))
            time.sleep(30)
            ret = crawl_contributors(name)
            return ret
        else:  # 获取包信息失败
            print(e.code)
            print("HTTP error no pkg: %s" % name)
            log = open("log.txt", "a")
            log.write("fail to get pkg %s contributors\n" % name)
            log.close()
            return []
    except socket.error as e:
        time.sleep(10)
        print("sleep now")
        return crawl_contributors(name)
    contributor_list = json.load(response)
    print("success to get pkg %s contributors" % name)
    return contributor_list

def crawl_vulnerability(name, ver):
    url = "https://pypi.org/pypi/%s/json" % name if ver == "" else "https://pypi.org/pypi/%s/%s/json" % (name, ver)
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = [('User-Agent', random_agent)]
    proxy = get_proxy().get('proxy')
    proxy_handler = req.ProxyHandler({"http": "http://{}".format(proxy)})
    opener = req.build_opener(proxy_handler)
    opener.addheaders = headers
    req.install_opener(opener)
    try:
        response = req.urlopen(url)
    except error.HTTPError as e:
        if e.code == 429 or e.code == 502:  # 访问频繁被拒绝
            print("wait 30s for error %s while crawling pkg:%s vulnerability" % (e.code, name + "@" + ver))
            time.sleep(30)
            ret = crawl_vulnerability(name, ver)
            return ret
        else:  # 获取包信息失败
            print(e.code)
            print("HTTP error no pkg: %s:%s" % (name, ver))
            log = open("log.txt", "a")
            log.write("fail to get pkg %s:%s vulnerability\n" % (name, ver))
            log.close()
            return []
    except socket.error as e:
        time.sleep(10)
        print("sleep now")
        return crawl_vulnerability(name, ver)
    try:
        package = json.load(response)
        delete_proxy(proxy)
        return package['vulnerabilities']
    except:
        print("repeat %s" % name+"@"+ver)
        return crawl_vulnerability(name, ver)

def crawl_github_api(url):
    token = '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'
    url = url.strip()
    random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
    headers = {'User-Agent': random_agent, 'Authorization': 'token %s' %  token}
    r = req.Request(url, headers=headers)
    try:
        response = req.urlopen(r)
    except error.HTTPError as e:
        if e.code == 403 or e.code == 429 or e.code == 502:  # 访问频繁被拒绝
            print("wait 30s for error %s while crawling %s" % (e.code, url))
            time.sleep(30)
            ret = crawl_github_api(url)
            return ret
        else:  # 获取包信息失败
            print(e.code)
            print("HTTP error %s" % url)
            log = open("stars/log.txt", "a")
            log.write("fail to get %s info\n" % url)
            log.close()
            return {}
    except socket.error as e:
        print("sleep now")
        time.sleep(10)
        return crawl_github_api(url)
    response_dict = json.load(response)
    print("success to get %s information" % (url))
    return response_dict