#!/usr/bin/env python
#coding=utf-8
from crawler import Crawler
import requests
from lxml import html as H
#from creepy import Crawler
from urlparse import urljoin,urlparse
import json
import logging
import multiprocessing
from multiprocessing import Queue
import os
from datetime import datetime
from collections import defaultdict
from log_helper import logger
import fcntl
import re
import signal




class DebianPackageCrawler:
    def __init__(self):
        self.crawled = set()
        self.thread_count = 10
        


    def getHomePage(self, pkg_name):
        pkgPageUrl = "https://packages.debian.org/source/stretch/{}".format(pkg_name)
        if not pkgPageUrl:
            return 

        response = requests.get(pkgPageUrl)
        if response.status_code != 200:
            return None
        dom = H.fromstring(response.text)
        # 存在一些虚包，它们并没有正常界面，因此可能会找出None
        uls = dom.cssselect('div#pmoreinfo')
        if not uls:
            return None
        ul = uls[0].getchildren()
        for k,ele in enumerate(ul):
            if ele.text == "External Resources:":
                return ul[k+1].cssselect("li a")[0].attrib["href"]

    def crawlGitUrl(self, pkg):
        start_url = self.getHomePage(pkg)
        if not start_url:
            return
        logger.debug("Package homepage: {}".format(start_url))

        if start_url in self.crawled:
            logger.info("{} already crawled".format(start_url))
            return
        self.crawled.add(start_url)
        urls = [start_url]

        logger.debug("start_url: {}".format(start_url))
        parsed_url = urlparse(start_url)
        scheme = parsed_url.scheme
        tld = parsed_url.netloc
        # 同domain下仍有重复抓取的必要性
        # with open("output/DebianCrawler/domain_cache") as f:
        #     tlds = f.read().split('\n')
        #     if tld in tlds:
        #         logger.info("Domain {} already crawled".format(tld))
        #         return urls
        # with open("output/DebianCrawler/domain_cache", "a") as f:
        #     f.write("%s\n" % tld)
        base_url = "{}://{}".format(scheme, tld)
        # if "github" in tld:
        # 防止不抓取github.io tld下的内容
        if tld == "github.com":
            return urls
        
        class MyCrawler(Crawler):
            def process_document(self, doc):
                if doc.status == 200:
                    dom = H.fromstring(doc.text)
                    links = dom.cssselect("a")
                    extracted_links = []

                    for link in links:
                        if 'href' not in link.attrib:
                            continue
                        href = link.attrib['href']
                        if href.startswith("#"):
                            continue
                        if not href.startswith("http"):
                            href = urljoin(base_url, href)
                        extracted_links.append(href)

                    urls.extend(extracted_links)

        def handler(signum, frame):
            raise StopIteration("end of time")

        crawler = MyCrawler()
        crawler.set_follow_mode(Crawler.F_SAME_HOST)
        crawler.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|tar|gz|bz2|deb|rpm|xz|msi|exe|zip|rar|asc|xml)$')
        crawler.set_max_depth(5)

        try:
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(60 * 5)

            crawler.crawl(start_url)
        except StopIteration:
            return urls

        # 使用爬虫从homepage里爬取同域名下所有页面
        # 从所有页面中提取a元素中的hyperlink
        # 需要注意，a中提取的元素有可能部分为锚点，还有一些是间接跳转，需要对它们做特殊处理
        return urls


    def worker(self, queue):
        while True:
            pkg = queue.get()
            logger.info("Start to crawl {}".format(pkg))
                
            try:
                related_url = self.crawlGitUrl(pkg)
                logger.debug("Crawler for {} stopped".format(pkg))
                if not related_url:
                    return

                with open("output/DebianCrawler/{}".format(pkg), "w") as f:
                    for url in related_url:
                        f.write("{}\n".format(url))

            except Exception,e:
                logger.error(e, exc_info=True)       

    def start(self):
        cnt = 0
        queue = Queue()

        with open("input/allpackages.txt") as f:
            for l in f:
                cnt +=1
                pkg = l.split()[0].strip() 
                queue.put(pkg)
                logger.debug("put {} into queue".format(pkg))
                if cnt >= 15:
                    break
        threads = []
        logger.debug("Start spawing threads")
        for i in range(self.thread_count):
            p = multiprocessing.Process(target=self.worker, args=(queue, ))
            p.start()
            threads.append(p)
            # threads.append(gevent.spawn(self.worker))
        for t in threads:
            t.join()
