#!/usr/bin/env python
# coding=utf-8

import requests
from lxml import html as H
from creepy import Crawler
from urlparse import urljoin,urlparse
import json
import logging
from multiprocessing.dummy import Pool as thread_pool
import os
from datetime import datetime
from collections import defaultdict
from log_helper import logger
from urlparse import urljoin


#region Start Debian Package页面解析相关函数
def getPackagePageUrl(pkg_name):
    return "https://packages.debian.org/stretch/{}".format(pkg_name)

def getHomePage(pkg_name):
    pkgPageUrl = getPackagePageUrl(pkg_name)
    if not pkgPageUrl:
        return 

    response = requests.get(pkgPageUrl)
    if response.status_code != 200:
        return None
    dom = H.fromstring(response.text)
    # 存在一些虚包，它们并没有正常界面，因此可能会找出None
    uls = dom.cssselect('div#pmoreinfo')
    if not uls:
        return None
    ul = uls[0].getchildren()
    for k,ele in enumerate(ul):
        if ele.text == "External Resources:":
            return ul[k+1].cssselect("li a")[0].attrib["href"]

def crawlGitUrl(start_url):
    urls = [start_url]
    parsed_url = urlparse(start_url)
    scheme = parsed_url.scheme
    tld = parsed_url.netloc
    if "github" in tld:
        return urls
    base_url = "{}://{}".format(scheme, tld)

    # TODO: 在爬取3dldf包时，会产生大量的parse error
    # 由于crawler内部使用了threading的原因，这些parse error都会被直接输出，而非进入log
    class spider(Crawler):
        def process_document(self,doc):
            if doc.status==200:
                # print '[%d] %s' % (doc.status, doc.url)
                dom = H.fromstring(doc.text)
                links = dom.cssselect("a")
                extracted_links = []
                for link in links:
                    if 'href' not in link.attrib:
                        continue
                    href = link.attrib['href']
                    # 排除锚点
                    if href.startswith("#"):
                        continue
                    if not href.startswith("http"):
                        href = urljoin(base_url, href)
                    extracted_links.append(href)

                urls.extend(extracted_links)
    # 使用爬虫从homepage里爬取同域名下所有页面
    # 从所有页面中提取a元素中的hyperlink
    # 需要注意，a中提取的元素有可能部分为锚点，还有一些是间接跳转，需要对它们做特殊处理
    crawler = spider()
    crawler.set_follow_mode(Crawler.F_SAME_HOST)
    crawler.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf)$')
    crawler.crawl(start_url)
    return urls


def getRelatedUrlByPkg(pkg, dumpFile = False):
    try:
        homepage = getHomePage(pkg)
        if not homepage:
            return 
        logger.info("Package homepage: {}".format(homepage))
        related_url = crawlGitUrl(homepage)
        if not related_url:
            return
        if dumpFile:
            with open("../output/debian/{}".format(pkg), "w") as f:
                json.dump(related_url, f)
        return related_url
    except Exception,e:
        logger.error(e, exc_info=True)
#endregion

def searchCve():
    '''
    处理所有package，在nvd的数据库中获取相关的cve
    '''
    try:
        with open("../input/allpackages.txt") as f1, open("../input/nvdcve-1.0-modified.json") as f2 :
            pkgnames = [l.split()[0].strip() for l in f1 if l.split()[0].strip()]
            nvdcve = json.load(f2)["CVE_Items"]
            for pkg in pkgnames:
                for cve in nvdcve:
                    cve_str = json.dumps(cve)
                    if pkg in cve_str:
                        logger.info("{} {}".format(pkg, cve["cve"]["CVE_data_meta"]["ID"]))
    except Exception,e:
        logger.error(e, exc_info=True)

        
def run():
    '''
    处理所有package，抓取其homepage，再以homepage为基础爬取同TLD的所有网页，获取上面所有的url
    '''
    with open("../input/allpackages.txt") as f:
        pool = thread_pool(20)       
        pkgnames = [l.split()[0].strip() for l in f if l.split()[0].strip()]
        pool.map(getRelatedUrlByPkg, pkgnames)
        # print pool.map(lambda x:x**2, [1,2,3])
        pool.join()
            
def CountCrawledUrl():
    '''
    统计获取到了多少package，多少url，其中包含多少git url
    '''
    collected_packages = os.listdir("../output/debian")
    cnt = 0
    # 抓取到的URL当中有不少重复，考虑到目前的总数也不过百万数量级，直接在内存中进行去重
    git_cnt = 0
    all_urls = set()
    for pkg in collected_packages:
        with open("../output/debian/{}".format(pkg)) as f:
            all_urls.update(json.load(f))
            # cnt += len(urls)
    git_urls = [x for x in all_urls if "git" in x]
    tld_map = defaultdict(int)
    for url in git_urls:
        tld = urlparse(url).netloc
        tld_map[tld] += 1
        # logger.info(url)

    logger.info("Collected {} urls from {} packages, including {} git urls".format(len(all_urls), len(collected_packages), len(git_urls)))
    tld_map = sorted([(value, key) for key,value in tld_map.items()], reverse=True)
    for value, key in tld_map[:10]:
        logger.info("{} : {}".format(key, value))


def getSourceCodeDirectoryInfo(srcPkg):
    '''
    @params
        srcPkg: a Debian source package name
    @retval
        list of directories in its source code
    '''
    resp = requests.get("https://sources.debian.net/src/{}".format(srcPkg))
    if resp.status_code!=200:
        return None
    
    version_page = H.fromstring(resp.text)
    ul_nodes = version_page.cssselect("ul#ls")
    if not ul_nodes:
        return None
    ul_node = ul_nodes[0]
    for li in ul_node:
        if "stretch" in li.text_content():
            link = li.cssselect("a")[0].attrib["href"]
            break
    else:
        return None
    url = urljoin("https://sources.debian.net", link)

    resp = requests.get(url)
    if resp.status_code != 200:
        return None
    dom = H.fromstring(resp.text)
    table = dom.cssselect("table.dir-listing")[0]
    directories = []
    for tr in table.getchildren():
        # if tr.cssselect("td.stat-type")[0].text_content().strip()!="d":
        #     continue
        directory = tr.cssselect("td.item-name")[0].text_content().strip()
        directories.append(directory)

    return directories



# def getVcsByDebianPackageTracker(pkgname):
#         resp = requests.get("https://tracker.debian.org/pkg/{}".format(pkgname))
#         if resp.status_code == 200:
#             dom = H.fromstring(resp.text)
#             ele = dom.cssselect(".list-group.list-group-flush")[0].cssselect(".list-group-item")[6]
#             text = ele.text_content()
#             link = ele.cssselect("a")[1].attrib["href"]
#             return pkgname, link
#         return None

if __name__=="__main__":
    from sys import argv
    for x in getSourceCodeDirectoryInfo(argv[1]):
        print x