#coding=utf-8

import urllib.request
import chardet
from html.parser import HTMLParser

class Spider(object):
    def __init__(self):
        self.__todocollection = list()
        self.__visitedtable = dict()
        self.__urlparser = UrlParser()
        self.__maxvisitedurls = 15
    
    def setfiltrules(self, rules):
        self.__urlparser.setfilterrules(rules)    
        
    def feed(self, root):
        self.__todocollection.append(root)
        self.__run()
        
    # Overridable -- handle do your own business
    def handle_do(self, htmlcode):
        pass
     
    def setmaxvisitedurls(self, maxvisitedurls):
        self.__maxvisitedurls = maxvisitedurls
                   
    def getvisitedurls(self):
        return self.__visitedtable.keys()
    
    def __run(self):
        maxcouter = 0
        while len(self.__todocollection) > 0 and maxcouter < self.__maxvisitedurls:
            if self.__try_deal_with_one_url(self.__todocollection.pop(0)):
                maxcouter += 1
    
    def __try_deal_with_one_url(self, url):
        if not self.__visitedtable.get(url):
            self.__parse_page(url)
            self.__visitedtable[url] = True
            self.__todocollection += self.__urlparser.geturls()
            return True
        return False
        
    def __parse_page(self, url):
        text = self.__get_html_text(url)
        self.handle_do(text)
        self.__urlparser.feed(text)
        
    def __get_html_text(self, url):
        filtermanager = FilterManager({'prefix' : ['://', 'http', 'https']})
        if filtermanager.matchprefix('prefix', url):
            return self.__get_html_text_from_net(url)
        else:
            return self.__get_html_text_from_local(url)  
     
    def __get_html_text_from_net(self, url):
        try:
            page = urllib.request.urlopen(url)
        except:
            print("url request error, please check your network.")
            return str()
        
        text = page.read()
        encoding = chardet.detect(text)['encoding']      
        return text.decode(encoding, 'ignore') 
    
    def __get_html_text_from_local(self, filepath):
        try:
            page = open(filepath)
        except:
            print("no such file, please check your file system.")
            return str()
        
        text = page.read()
        page.close()
        return text     

class UrlParser(HTMLParser):
    def __init__(self, 
                 filtrules = {'postfix' : ['.', 'html', 'shtml', 'asp', 'php', 'jsp', 'com', 'cn', 'net', 'org', 'edu', 'gov']}):
        HTMLParser.__init__(self)
        self.__urls = list()
        self.__filtrules = filtrules
        
    def setfilterrules(self, rules):
        self.__filtrules = rules
 
    def handle_starttag(self, tag, attrs):
        if(tag == 'a' or tag == 'frame'):
            self.__parse_href_attr(attrs)              
    
    def geturls(self):
        list(set(self.__urls))
        return list(set(self.__urls))
    
    def __parse_href_attr(self, attrs):
        for attr in attrs:
                if(attr[0] == 'href' and self.__match_url(attr[1])):
                    self.__urls.append(attr[1])
    
    def __match_url(self, text):
        return FilterManager(self.__filtrules).matchpostfix('postfix', text)

class FilterManager():
    def __init__(self, rules):
        self.__rules = rules
    
    def __str__(self):
        return self.__rules.__str__()
    
    def getrules(self):
        return self.__rules
    
    def updaterules(self, newrules):
        self.__rules.update(newrules)
            
    def removerules(self, delkeys):
        for key in delkeys:
            del(self.__rules[key])
            
    def clearrules(self):
        self.__rules.clear()
    
    def matchprefix(self, key, source):
        return self.__match(key, source, self.__handle_match_prefix)
    
    def matchpostfix(self, key, source):
        return self.__match(key, source, self.__handle_match_postfix)
    
    def matchdata(self, key, source):
        return self.__match(key, source, self.__handle_match_data)
    
    def __match(self, key, source, handle_match):
        try:
            if self.__rules.get(key):
                rule = self.__rules[key]
                return handle_match(rule, source)
        except:
            print('rules format error.')
        return True 
    
    def __handle_match_prefix(self, rule, source):
        return source.split(rule[0])[0] in rule[1:]
    
    def __handle_match_postfix(self, rule, source):
        return source.split(rule[0])[-1] in rule[1:]
    
    def __handle_match_data(self, rule, source):
        if rule[0] == '&':
            for word in rule[1:]:
                if not word in source:
                    return False
            return True
        else:
            for word in rule[1:]:
                if word in source:
                    return True
            return False

######################################################################

class TitleSpider(Spider):
    def __init__(self):
        Spider.__init__(self);
        self.__titleparser = TitleParser()
    
    def setfiltrules(self, rules):
        self.__titleparser.setfilterrules(rules)     
        
    def handle_do(self, htmlcode):
        self.__titleparser.feed(htmlcode)
    
    def gettitles(self):
        return self.__titleparser.gettitles()
                                              
class TitleParser(HTMLParser):
    def __init__(self, filtrules = {}):
        HTMLParser.__init__(self)
        self.__istitle = False
        self.__titles = list()
        self.__filtrules = filtrules;
    
    def setfilterrules(self, rules):
        self.__filtrules = rules
        
    def handle_starttag(self, tag, attrs):
        if(tag == 'title'):
            self.__istitle = True
            
    def handle_data(self, data):
        if self.__istitle and self.__match_data(data):
            self.__titles.append(data)
        self.__istitle = False
            
    def gettitles(self):
        return self.__titles      
    
    def __match_data(self, data):
        return FilterManager(self.__filtrules).matchdata('data', data)

######################################################################
class ImgSpider(Spider):
    def __init__(self):
        Spider.__init__(self);
        self.__imgparser = ImgParser()
        
    def handle_do(self, htmlcode):
        self.__imgparser.feed(htmlcode)
  
class ImgParser(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self)
        self.imgnameindex = 0
        
    def handle_starttag(self, tag, attrs):
        if(tag == 'img'):
            self.__parse_attrs(attrs)   
                    
    def __parse_attrs(self, attrs):
        for attr in attrs:
            self.__parse_one_attr(attr)
    
    def __parse_one_attr(self, attr):
        filtermanager = FilterManager({'postfix' : ['.', 'jpg']})
        if(attr[0] == 'src' and filtermanager.matchpostfix('postfix', attr[1])):
            self.__download_jpg(attr[1])
                           
    def __download_jpg(self, url):
        try:
            urllib.request.urlretrieve(url,'%s.jpg' % self.imgnameindex)
            self.imgnameindex += 1
        except:   
            pass
                    
if __name__ == '__main__': 
    #spider = TitleSpider()
    #spider.feed("http://mil.sohu.com/s2014/jjjs/index.shtml")
    #print(spider.gettitles())
    
    spider = ImgSpider()
    spider.feed("http://gaoqing.la")
    print(spider.getvisitedurls())
    
    
