#coding=utf-8


from ripper.core import EncodingConvert
from ripper.core.Exceptions import DownloadException
import uuid
import urllib,sys,os,re,time
from ripper.core.Utils import enable_proxy, disable_proxy #@UnresolvedImport
from ripper.core.Utils import clear_url #@UnresolvedImport
import socket
import urlparse

try:
    import Image # request PIL 1.1.6
except ImportError:
    print 'PIL 1.1.6 required'    
    
    
socket.setdefaulttimeout(35)


__doc__='''

       http下载相关

'''

 

class HttpHandler:

    ''' http下载类 '''

    def __init__(self, baseDir, useProxy=False):
        self.baseDir = baseDir
        self.defaultUseProxy = useProxy
        self.useProxy = useProxy
        self.proxies = {}
        if useProxy == True:
            self.enableProxy()
    
    def enableProxy(self):
        self.useProxy = True
#        for url in Utils.getConfig('httpproxy').split(','):
#                 self.proxies['http'] = url
        enable_proxy()

    def disableProxy(self):
        self.useProxy = False
        self.proxies = {}
        disable_proxy()
        
        
    # 下载html页面
    def getContent(self,burl,needConvert=False):
        burl = clear_url(burl)
        conn = urllib.urlopen(burl,proxies=self.proxies)    
        tstart = time.time()
        content = conn.read()
        tcost = str(time.time() - tstart)
        noticeText = 'I have parsed '+burl+',It costs'+tcost+' seconds'+ (self.useProxy == True and '(throught proxy)' or '')
        encc,content = EncodingConvert.zh2utf8(content)
        print 'page encoding:',encc
        if float(tcost) > 30.0 :
            noticeText = noticeText + ', What the fuck, why takes so long...'
        elif float(tcost) > 100.0 :
            noticeText = noticeText + 'dude, you may consult 10000 :)'
        # 文本格式转换
        if needConvert == True:
            content = content.replace('\r\n','\n')
        return content

    # 通过url下载图片,返回保存在本地的filename
    def getImage(self,imgUrl,preAssignFilename=None, fixed=False):
#        imgUrl = fixurl(imgUrl)
        filename = None
        if None == preAssignFilename :
            filename = getId()
        else:
            filename = preAssignFilename
        try:
            opener = urllib.FancyURLopener(self.proxies)
            imgDir = self.baseDir
            tstart = time.time()
            fn,headers = opener.retrieve(imgUrl)
            tp = str(headers.gettype())
            # 根据header的type判断文件类型并添加扩展名
            if re.match('.*?jp[e]*g.*',tp):
                filename = filename + '.jpg'
            elif re.match('.*?gif.*',tp):
                filename = filename + '.gif'
            elif re.match('.*?bmp.*',tp):
                filename = filename + '.bmp'
            elif re.match('.*?png.*',tp):
                filename = filename + '.bmp'
            elif tp == 'application/octet-stream':
                filename = filename + os.path.basename(fn)
            elif 'image' not in tp:
                # 非图片内容
                if fixed == False:
                    return self.getImage(fixurl(imgUrl),fixed=True)
                else:
                    raise DownloadException(u'not a images:' + imgUrl)
            
            # 保存文件
            absName = os.path.join(imgDir, filename)
            ct = open(fn,'rb').read()
            
            if len(ct) < 10:
                raise DownloadException('image too small')
            
            f = open(absName,'wb')
            f.write(ct)
            f.close()
            tcost = str(time.time() - tstart)[0:5]
            notice = 'Download finished:'+filename+',costs'+tcost+' seconds.'+ (self.useProxy == True and '(throught proxy)' or '')
            fl = -1
            
            # compress img
            try:
                Image.open(os.path.join(imgDir, filename)).save(os.path.join(imgDir, filename))
            except Exception:
                pass
            # get file size
            try:
                fl = os.path.getsize(os.path.join(imgDir, filename))
            except Exception:
                pass
            return filename,fl
        except UnicodeError, err:
            print err
            if fixed == False:
                return self.getImage(fixurl(imgUrl),fixed=True)
            if self.useProxy == True:
                self.disableProxy()
                raise DownloadException(u'Download failed:' + imgUrl)
            else:
                self.enableProxy()
                return self.getImage(imgUrl,filename)
        except Exception, ex:
            print ex
            raise DownloadException(u'Download failed:' + imgUrl)
        finally:
            if self.useProxy == True:
                    self.disableProxy()
                    
def fixurl(url):
    # turn string into unicode
    if not isinstance(url,unicode):
        url = url.decode('utf8')

    # parse it
    parsed = urlparse.urlsplit(url)

    # divide the netloc further
    userpass,at,hostport = parsed.netloc.partition('@')
    user,colon1,pass_ = userpass.partition(':')
    host,colon2,port = hostport.partition(':')

    # encode each component
    scheme = parsed.scheme.encode('utf8')
    user = urllib.quote(user.encode('utf8'))
    colon1 = colon1.encode('utf8')
    pass_ = urllib.quote(pass_.encode('utf8'))
    at = at.encode('utf8')
    host = host.encode('idna')
    colon2 = colon2.encode('utf8')
    port = port.encode('utf8')
    path = '/'.join(  # could be encoded slashes!
        urllib.quote(urllib.unquote(pce).encode('utf8'),'')
        for pce in parsed.path.split('/')
    )
    query = urllib.quote(urllib.unquote(parsed.query).encode('utf8'),'=&?/')
    fragment = urllib.quote(urllib.unquote(parsed.fragment).encode('utf8'))

    # put it back together
    netloc = ''.join((user,colon1,pass_,at,host,colon2,port))
    return urlparse.urlunsplit((scheme,netloc,path,query,fragment))

# 生成UUID, 命名图片和文本文件
def getId():
    id0 = str(hex(int(str(time.time()).replace('.',''))))
    id1 = str(uuid.uuid1())
    return id0+'-'+id1

if __name__ == '__main__':
    uri = 'http://4.share.photo.xuite.net/photo.xuite.net314/142894d/5129357/197447104_x.jpg'
    dd = HttpHandler('h:/resource')
    print dd.getImage(uri)
    
