'''
Created on Jan 31, 2012

@author: Forever Together
'''

import mechanize
import gzip
import sys
import cStringIO as StringIO
from lxml import etree
from _logger import Log
import urllib
import traceback

class Http():
    
    logger = Log.getLogger('Http')
    
    @staticmethod
    def getHtmlWithCookie(url, data={}, header={}, cookie=None):
        html = ''
        Http.logger.debug("start load url: {0}".format(url))
        hasError = False
        try:
            opener = mechanize.build_opener(mechanize.HTTPDefaultErrorHandler, mechanize.HTTPRedirectHandler, mechanize.HTTPRefererProcessor)
            request_header = [] if not hasattr(header, 'User-Agent') and not hasattr(header, 'user-agent') else [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:10.0) Gecko/20100101 Firefox/10.0')] 
            for i,v in header.items(): request_header.append((i, v))
            if cookie != None: request_header.append(("Cookie", str(cookie)))
            opener.addheaders = request_header
            mechanize.install_opener(opener)
            try:
                response = mechanize.urlopen(url)
            except:
                traceback.print_exc()
                response = urllib.urlopen(url)
            html = response.read()
            encoding = response.info().getheader('Content-Encoding', '')
            cookie = response.info().getheader('Set-Cookie')
            if encoding == 'gzip':
                Http.logger.debug('Content-Encoding: gzip')
                gzipper = gzip.GzipFile(fileobj=StringIO.StringIO(html))
                html = gzipper.read()
            while(ord(html[0])>128 or ord(html[0])<32): html = html[1:]
        except:
            hasError = True
            Http.logger.info("get html failed with url: {0} reason({1}:{2})".format(url, sys.exc_info()[0], sys.exc_info()[1]));
        if (not hasError): Http.logger.debug("finished url: {0}".format(url))
        return { 'content': html, 'cookie': cookie }
    
    @staticmethod
    def getHtml(url, data={}, header={}, cookie=None):
        ret = Http.getHtmlWithCookie(url, data, header, cookie)
        return ret.get('content')
    
    @staticmethod
    def getXMLTreeWithCookie(url, data={}, header={}, cookie=None, xml_type=False):
        tree = None
        try:
            ret = Http.getHtmlWithCookie(url, data, header, cookie)
            html = ret.get('content')
            cookie = ret.get('cookie')
            parser = etree.XMLParser(encoding='utf-8') if xml_type==True else etree.HTMLParser(encoding='utf-8')
            tree = etree.parse(StringIO.StringIO(html), parser)
        except:
            Http.logger.error("build tree failed with url: {0} reason({1}:{2})".format(url, sys.exc_info()[0], sys.exc_info()[1]))
        finally:
            return { 'content': tree, 'cookie': cookie }
    
    @staticmethod
    def getXMLTree(url, data={}, header={}, cookie=None, xml_type=False):
        ret = Http.getXMLTreeWithCookie(url, data, header, cookie, xml_type)
        return ret.get('content')