#-*- encoding: utf-8 -*-
__all__ = ['fetch', 'get_opener', 'get_content_type']
"""
One fetch web page tool with cookie and encode.

History:
2008-05-20 16:00: ADD upload file provide
2008-06-04 17:12: add headers the fetch result
2008-06-06 14:17: add socks proxy provide
2009-04-29 15:58: remove common_refer
"""

import urllib2, cookielib, urllib, mimetypes, socket, httplib
#from socks_proxy_inject import ProxyProcessor
#import socks_proxy as socks

def get_opener():
    #socks_proxy=ProxyProcessor('127.0.0.1', 7070, socks.PROXY_TYPE_SOCKS5)
    #proxy_handler=urllib2.ProxyHandler({'http': '12.24.45.100:80'})

    cookie_handler = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
    return urllib2.build_opener(cookie_handler)

common_opener = get_opener()

def open_request(url,data=None,headers=None,opener=None):
    if headers is None:
        headers = []

    #if data:data=urllib.urlencode(data)
    request=urllib2.Request(url,data)
    request.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)')
    request.add_header("Accept-Encoding","gzip")
    #request.add_header('Connection','Keep-Alive')
    for k,v in headers:
        request.add_header(k,v)
    #if option.has_key('referer'):
    #request.add_header('Referer',option['referer'])
    #print 'start open server...'
    res=(opener or common_opener).open(request)
    #print 'open server end'
    return res

def get_content_type(filename):
    return mimetypes.guess_type(filename)[0] or 'application/octet-stream'    

def encode_multipart_formdata(fields, files):
    """
    refer from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/572202

    fields is a sequence of (name, value) elements for regular form fields.
    files is a sequence of (name, filename, value) elements for data to be uploaded as files
    Return (content_type, body) ready for httplib.HTTP instance
    """
    BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
    CRLF = '\r\n'
    L = []
    for (key, value) in fields:
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"' % key)
        L.append('')
        L.append(urllib.quote_plus(str(value)))
    for (key, filename, value) in files:
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, urllib.quote_plus(filename)))
        L.append('Content-Type: %s' % get_content_type(filename))
        L.append('')
        L.append(value)
    L.append('--' + BOUNDARY + '--')
    L.append('')
    body = CRLF.join(L)
    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
    return content_type, body

def fetch(url,encode=False,headers=None,opener=None,fields=None, files=None):
    """
    这样设置 headers=[] 的默认值有很大的问题，因为默认值只在函数建立的时候建立，以后都不会变化，那么header就会越来越长了
    """

    if headers is None:
        headers = []

    data=None
    if files:
        t, d=encode_multipart_formdata(fields, files)
        headers.append(('Content-Type', t))
        headers.append(('Content-Length', str(len(d))))
        data=d
    elif fields:
        data=urllib.urlencode(fields)

    f=open_request(url,data,headers,opener)
    result={}

    #print 'start read data...'
    result['data'] = f.read()
    #print 'read data end'

    if hasattr(f, 'headers'):
        if f.headers.get('content-encoding', '') == 'gzip':
            import StringIO,gzip
            result['data'] = gzip.GzipFile(fileobj=StringIO.StringIO(result['data'])).read()
    #check encoding
    if encode:
        charset=None
        if hasattr(f, 'headers'):
            ct=f.headers.get('content-type')
	    #print ct
	    if ct:
		ct=ct.lower()
		i=ct.find('charset=')
		if i!=-1:
		    charset=ct[i+len('charset='):]
	if charset=="gb2312": charset='gb18030'
	if charset:
	    try:
		result['data']=result['data'].decode(charset)
	    except:
		result['data']=deal_invalid_character(result['data'],charset)
		result['data']=result['data'].decode(charset)
	else:
	    result['data']=encode_by_html(result['data'])

    if hasattr(f, 'url'):
        result['url'] = f.url
        result['status'] = 200
    if hasattr(f, 'status'):
        result['status'] = f.status
    f.close()
    result['headers']=f.headers
    return result

def encode_by_html(html):
    #<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
    import re
    mt=re.search(r'''<meta[^<>]+http-equiv[\s="']+Content-Type[^<>]*>''',html,re.I)
    charset=None
    if mt:
        mt=mt.group().lower()
        mmt=re.search(r'''charset\s*=\s*([^'"\s<>]+)''',mt)
        if mmt:
            charset=mmt.group(1)
    if charset=='gb2312': charset='gb18030'
    #print charset
    if not charset:
        return html.decode('us-ascii')
    else:
	try:
	    return html.decode(charset)
	except Exception,err:
	    html=deal_invalid_character(html,charset)
	    return html.decode(charset)

def deal_invalid_character(html,charset):
    import re
    while 1:
	try:
	    html.decode(charset)
	except Exception,err:
	    err_str=str(err)
	    if err_str.find("codec can't decode bytes in position"):
		err_mt=re.search(r'(\d+)-(\d+)',err_str)
		if err_mt:
		    html=html[0:int(err_mt.group(1))]+html[int(err_mt.group(1))+1:]
		else:
		    return html
	    else:
		return html
	else:
	    return html

def getHeaders(url,data=None,headers={}):
    f=open_request(url,data,headers)
    if hasattr(f, 'headers'):
        ct=f.headers.get('content-type')
        #'text/html; charset=UTF-8'
        f.close()
    else:
        ct=None
    return ct


if __name__=='__main__':
    fields=[('albumId', '3364')]
    fdata=file("D:\IMG_0022.JPG","rb").read()
    files=[('Filedata','IMG_0022.JPG',fdata)]
    d=fetch('http://2.com:8000/album/uploadPic.do',fields=fields, files=files)
    print d
