# -*- coding: utf-8 -*-
'''
Created on 2013-5-1

@author: YANGCHENLONG
'''
import urllib,httplib,urlparse
import re
import random
import urllib2
import os
import sys

class ImageFetchTool(object):
      
    """judge url exists or not,by others"""  
    def httpExists(self,url):  
        host, path = urlparse.urlsplit(url)[1:3]  
        if ':' in host:  
            # port specified, try to use it  
            host, port = host.split(':', 1)  
            try:  
                port = int(port)  
            except ValueError:  
                print 'invalid port number %r' % (port,)  
                return False  
        else:  
            # no port specified, use default port  
            port = None  
        try:  
            connection = httplib.HTTPConnection(host, port=port)  
            connection.request("HEAD", path)  
            resp = connection.getresponse( )  
            if resp.status == 200:       # normal 'found' status  
                found = True  
            elif resp.status == 302:     # recurse on temporary redirect  
                found = self.httpExists(urlparse.urljoin(url,resp.getheader('location', '')))  
            else:                        # everything else -> not found  
                print "Status %d %s : %s" % (resp.status, resp.reason, url)  
                found = False  
        except Exception, e:  
            print e.__class__, e, url  
            found = False  
        return found  
          
        """get html src,return lines[]"""  
    def gGetHtmlLines(self,url):  
        if url==None : return  
        if not self.httpExists(url): return   
        try:  
            page = urllib.urlopen(url)     
            html = page.readlines()  
            page.close()  
            return html  
        except:  
            print "gGetHtmlLines() error!"  
            return  
    """get html src,return string"""  
    def gGetHtml(self,url):  
        if url==None : return  
        if not self.httpExists(url): return   
        try:  
            page = urllib.urlopen(url)     
            html = page.read()  
            page.close()  
            return html  
        except:  
            print "gGetHtml() error!"  
            return  
      
    """根据url获取文件名"""  
    def gGetFileName(self,url):  
        if url==None: return None  
        if url=="" : return ""  
        arr=url.split("/")  
        return arr[len(arr)-1]  
      
    """生成随机文件名"""  
    def gRandFilename(self,type):  
        fname = ''  
        for i in range(16):  
            fname = fname + chr(random.randint(65,90))  
            fname = fname + chr(random.randint(48,57))  
        return fname + '.' + type  
    """根据url和其上的link，得到link的绝对地址"""  
    def gGetAbslLink(self,url,link):  
        if url==None or link == None : return   
        if url=='' or link=='' : return url   
        addr = ''   
        if link[0] == '/' :   
            addr = self.gGetHttpAddr(url) + link   
        elif len(link)>3 and link[0:4] == 'http':  
            addr =  link   
        elif len(link)>2 and link[0:2] == '..':  
            addr = self.gGetHttpAddrFatherAssign(url,link)  
        else:  
            addr = self.gGetHttpAddrFather(url) + link   
      
        return addr   
      
    """根据输入的lines，匹配正则表达式，返回list"""  
    def gGetRegList(self,linesList,regx):  
        if linesList==None : return   
        rtnList=[]  
        for line in linesList:  
            matchs = re.search(regx, line, re.IGNORECASE)  
            if matchs!=None:  
                allGroups = matchs.groups()  
                for foundStr in allGroups:  
                    if foundStr not in rtnList:  
                        rtnList.append(foundStr)  
        return rtnList  
    """根据url下载文件，文件名参数指定"""  
    def gDownloadWithFilename(self,url,referer,savePath):  
        #参数检查，现忽略  
        try:  
        #    urlopen=urllib.URLopener()  
        #    fp = urlopen.open(url)  
        #    data = fp.read()  
            path,fileName = os.path.split(savePath)
            if not os.path.exists(path):
                os.mkdir(path)
            i_headers = {"User-Agent": "Mozilla/5.0(Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1)\
                    Gecko/20090624 Firefox/3.5",\
                    "Referer": referer}
            req = urllib2.Request(url,data=None,headers=i_headers)
            res = urllib2.urlopen(url=url, data=None, timeout=60)
            data = res.read()
            res.close()
            file=open(savePath,'w+b')  
            file.write(data)  
            file.close()  
        except IOError,e:  
            print "download error!"+ url
            print e
              
    """根据url下载文件，文件名自动从url获取"""  
    def gDownload(self,url,referer,savePath):  
        #参数检查，现忽略  
        #fileName = self.gGetFileName(url)  
        #fileName =gRandFilename('jpg')  
        self.gDownloadWithFilename(url,referer,savePath)  
              
    """根据某网页的url,下载该网页的jpg"""  
    def gDownloadHtmlJpg(self,downloadUrl,savePath):  
        lines= self.gGetHtmlLines(downloadUrl)  
        regx = r"""src\s*="?(\S+)\.jpg"""  
        lists = self.gGetRegList(lines,regx)  
        if lists==None: return   
        for jpg in lists:  
            jpg = self.gGetAbslLink(downloadUrl,jpg) + '.jpg'  
            self.gDownload(jpg,savePath)  
       ###     print gGetFileName(jpg)  
    """根据url取主站地址"""  
    def gGetHttpAddr(self,url):  
        if url== '' : return ''  
        arr=url.split("/")  
        return arr[0]+"//"+arr[2]  
    """根据url取上级目录"""  
    def gGetHttpAddrFather(self,url):  
        if url=='' : return ''  
        arr=url.split("/")  
        addr = arr[0]+'//'+arr[2]+ '/'  
        if len(arr)-1>3 :  
            for i in range(3,len(arr)-1):  
                addr = addr + arr[i] + '/'  
        return addr  
      
    """根据url和上级的link取link的绝对地址"""  
    def gGetHttpAddrFatherAssign(self,url,link):  
        if url=='' : return ''  
        if link=='': return ''  
        linkArray=link.split("/")  
        urlArray = url.split("/")  
        partLink =''  
        partUrl = ''  
        for i in range(len(linkArray)):          
            if linkArray[i]=='..':   
                numOfFather = i + 1    #上级数  
            else:  
                partLink = partLink + '/'  + linkArray[i]  
        for i in range(len(urlArray)-1-numOfFather):  
            partUrl = partUrl + urlArray[i]   
            if i < len(urlArray)-1-numOfFather -1 :   
                partUrl = partUrl + '/'  
        return  partUrl + partLink  
      
    """根据url获取其上的相关htm、html链接，返回list"""  
    def gGetHtmlLink(self,url):  
        #参数检查，现忽略  
        rtnList=[]  
        lines=self.gGetHtmlLines(url)  
        regx = r"""href="?(\S+)\.htm"""  
        for link in self.gGetRegList(lines,regx):  
            link = self.gGetAbslLink(url,link) + '.htm'  
            if link not in rtnList:  
                rtnList.append(link)  
                print link  
        return rtnList  
      
    """根据url，抓取其上的jpg和其链接htm上的jpg"""  
    def gDownloadAllJpg(self,url,savePath):  
        #参数检查，现忽略  
        self.gDownloadHtmlJpg(url,savePath)  
        #抓取link上的jpg  
        links = self.gGetHtmlLink(url)  
        for link in links:  
            self.gDownloadHtmlJpg(link,savePath)  
      