import urllib2,re,traceback
from StringIO import StringIO

class ErrorHandler(urllib2.HTTPDefaultErrorHandler):
    def http_error_default(self, req, fp, code, msg, headers):
        result = urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
        result.status = code
        return result

err_handler = ErrorHandler()

class BaseCrawler(object):
    
    def __init__(self,link,sname,tname):
        self.link = link
        self.sname = sname
        self.tname = tname
        
    def getPage(self,url):
        req = urllib2.Request(url)
        mgr = urllib2.build_opener(err_handler)
        ns = mgr.open(req)
        if ns.code == 200:
            return ns.read()
        elif ns.code == 304:
            raise 'resource not modified'
        raise 'url not exists!'
    
    def docrawl(self):
        pass


    
class Crawler115(BaseCrawler):
    
    pattern1 = r'.*pickcode.*'
    pattern2 = r'.*"(http:.*?)".*'
    
    def __suck1(self,page):
        bufs = StringIO(page)
        line = ""
        while True:
            line = bufs.readline(1000000)
            if not line:
                break
            if re.match(Crawler115.pattern1,line):
                return "http://115.com" + line[line.index('"')+1:line.rindex('"')]
                break
    
    def __suck2(self,page):
        line = str(page)
        m = re.match(Crawler115.pattern2, line)
        link = m.groups()[0]
        link = link.replace(r'\/', '/')
        return link
    
    def save(self,content):
        fp = open(self.tname,'wb')
        fp.write(content)
        fp.close()
        
    def docrawl(self):
        try:
            self.save(self.getPage(self.__suck2(self.getPage(self.__suck1(self.getPage(self.link))))))
        except:
            traceback.print_exc()
            return False
        return True    
    