#!/usr/bin/env python3
#coding=utf8
#codeby guo_houtan@topsec.com.cn
#time 201603271907

import requests

import queue
import re
from base import BaseUrl,QueueThread

class Crawler(BaseUrl):
    REURL = ("(http[s]?://(?:[-a-zA-Z0-9_]+\.)+[a-zA-Z]+(?::\d+)"
             "?(?:/[-a-zA-Z0-9_%./]+)*\??[-a-zA-Z0-9_&%=.]*)")
    AEURL = (r"href='.*?'",
             r"src='.*?'"
            )
    def __init__(self,url):
        self.urlqueue = queue.Queue(0)
        self.backlist = set()
        self.urlqueue.put(url)
        self.baseurl = BaseUrl(url)
        self.Thread = QueueThread(self.urlqueue,self.geturl)

    def gethtml(self,url):
        req = requests.get(url)
        return req.text

    def geturl(self,url):
        self.backlist.append(url)
        res = self.gethtml(url)
        urls = re.findall(self.REURL,res)
        for url in re.findall(self.AEURL,res):
            if url.startwith("http"):
                urls.append(url)
            elif url.startwith(['/','.']):
                urls.append('%s/%s'%(self.baseurl.domain,url))
        for url in urls:
            url = url.split('?')[0]
            if self.url in url:
                if url not in self.backlist:
                    #print(url)
                    self.urlqueue.put(url)

    def start(self):
        self.Thread.start()


s=Crawler("http://www.freebuf.com/")
s.start()



