#!/usr/bin/env python
# encoding=utf-8
#codeby     道长且阻
#email      ydhcui@suliu.net/QQ664284092
#website    http://www.suliu.net


import time
import re

import gevent
from gevent.threadpool import ThreadPool

from lib.bs4 import BeautifulSoup
from core.log import Logger
from core.base import Request
from core.base import ConnectionError

try:
    import Queue as queue
except ImportError:
    import queue

log = Logger()
log.setFileHandler('./log/Crawler.log')

class Crawler(object):
    def __init__(self,baseurl,threads=1,timeout=10,sleep=5):
        if not baseurl.upper().endswith('HTTP'):
            baseurl  = 'HTTP://%s'%(baseurl)
        self.baseurl = baseurl
        self.threads = threads
        self.timeout = timeout
        self.sleep   = sleep
        self.pool    = ThreadPool(self.threads)
        self.Queue   = queue.Queue()
        self.block   = []#set() set error wtf?
        self.flag    = 0
        self.isstop  = False

    def addreq(self,req):
        log.info("GET %s"%req.url)
        self.Queue.put(req)

    def urljoin(self,url):
        """判断是否同域并加入队列"""
        if url:
            if url.startswith(self.baseurl):
                return url
            elif url.startswith('/') and not url.startswith('//'):
                return '%s%s'%(self.baseurl,url)
            elif url.startswith('.') and './' in url:
                return '%s/%s'%(self.baseurl,url)
            else:
                pass

    def isblock(self,req):
        if req not in self.block:
            self.block.append(req)
            self.addreq(req)

    def run(self,req):
        if 1:#try:
            response = req.request()
            content_type = response.headers.get('content-type')
            if "html" in content_type:
                self.htmlparse(response.text)
                #return
            elif "text" in content_type \
            or "json" in content_type \
            or "javascript" in content_type:
                self.textparse(response.text)
                #return
            else:
                pass
        #except ConnectionError:
            #gevent.sleep(self.sleep)
        #except Exception as e:
            #log.warn(e)

    def start(self):
        self.run(Request(self.baseurl))
        while True and self.flag<=60*5: #5分钟后还没有任务加进来就当爬完了
            log.load('Reload ... Wait for %s'%self.flag)
            try:
                req = self.Queue.get(block=False)
            except queue.Empty:
                gevent.sleep(1)
                self.flag += 1
            else:
                self.pool.spawn(self.run,req)
        gevent.wait(timeout=self.timeout)
        self.isstop  = True

    def textparse(self,response):
        urls = re.findall("(http[s]?://(?:[-a-zA-Z0-9_]+\.)+[a-zA-Z]+(?::\d+)?(?:/[-a-zA-Z0-9_%./]+)*\??[-a-zA-Z0-9_&%=.]*)",response)
        for url in urls:
            url = self.urljoin(url)
            if url:
                req = Request(url)
                self.isblock(req)

    def htmlparse(self,response):
        href_tags = {"a", "link", "area"}
        src_tags = {"form", "script", "img", "iframe", "frame", "embed", "source", "track"}
        param_names = {"movie", "href", "link", "src", "url", "uri"}
        for tag in BeautifulSoup(response,"html.parser").findAll():
            url = None
            data = {}
            name = tag.name.lower()
            if name in href_tags:
                url = tag.get("href", None)
            elif name in src_tags:
                url = tag.get("src", None)
            elif name == "param":
                name = tag.get("name", "").lower().strip()
                if name in param_names:
                    url = tag.get("value", None)
            elif name == "object":
                url = tag.get("data", None)
            elif name == "applet":
                url = tag.get("code", None)
            elif name == "meta":
                name = tag.get("name", "").lower().strip()
                if name == "http-equiv":
                    content = tag.get("content", "")
                    p = content.find(";")
                    if p >= 0:
                        url = content[ p + 1 : ]
            elif name == "base":
                url = tag.get("href", None)
            #for post fomm
            if name == "form":
                action = tag.get('action','')
                method = tag.get('method','GET').upper()
                data = {}
                #Process <input type="test" name="...
                for m in tag.findAll('input',{'name' : True,'type' : 'text'}):
                    value = m.get('value','')
                    data[m['name']] = value
                #Process <input type="password" name="...
                for m in tag.findAll('input',{'name' : True,'type' : 'password'}):
                    value = m.get('value','')
                    data[m['name']] = value
                #Process <input type="submit" name="...
                for m in tag.findAll('input',{'name' : True,'type' : 'submit'}):
                    value = m.get('value','')
                    data[m['name']] = value
                #Process <input type="hidden" name="...
                for m in tag.findAll('input',{'name' : True,'type' : 'hidden'}):
                    value = m.get('value','')
                    data[m['name']] = value
                #Process <input type="checkbox" name="...
                for m in tag.findAll('input',{'name' : True,'type' : 'checkbox'}):
                    value = m.get('value','')
                    data[m['name']] = value
                #Process <input type="radio" name="...
                listRadio = []
                for m in tag.findAll('input',{'name' : True,'type' : 'radio'}):
                    if not m['name'] in listRadio:
                        listRadio.append(m['name'])
                        value = m.get('value','')
                        data[m['name']] = value
                #Process <textarea name="...
                for m in tag.findAll('textarea',{'name' : True}):
                    data[m['name']] = m.contents[0]
                #Process <select name="...
                for m in tag.findAll('select',{'name' : True}):
                    if len(m.findAll('option',value=True))>0:
                        name = m['name']
                        data[name] = m.findAll('option',value=True)[0]['value']

            url = self.urljoin(url)
            if url:
                req = Request(url,data)
                self.isblock(req)


if __name__ == '__main__':
    x=Crawler('http://ldc.layabox.com/')
    #x.start()
    x.run(Request('http://www.szweb.cn/'))


