#!/usr/bin/env python
#
# agent.py
#
#  Copyright (c) 2006-2010  Yusuke Shinyama <yusuke at cs dot nyu dot edu>
#  
#  Permission is hereby granted, free of charge, to any person
#  obtaining a copy of this software and associated documentation
#  files (the "Software"), to deal in the Software without
#  restriction, including without limitation the rights to use,
#  copy, modify, merge, publish, distribute, sublicense, and/or
#  sell copies of the Software, and to permit persons to whom the
#  Software is furnished to do so, subject to the following
#  conditions:
#  
#  The above copyright notice and this permission notice shall be
#  included in all copies or substantial portions of the Software.
#  
#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
#  KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
#  WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
#  PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#  COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
#  OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#

import sys, re
import os.path
import mimetypes
import cookielib
try:
  from cStringIO import StringIO
except ImportError:
  from StringIO import StringIO


##  Agent
##   todo: cache support 
##
class Agent(object):

  USER_AGENT = 'Mozilla/5.0 Agent/0.1'
  HEADERS = {
    'User-Agent': USER_AGENT,
    'Accept-Encoding': 'gzip',
    'Connection': 'keep-alive'
    }
  CONTENT_TYPE_PAT = re.compile(r'([^\s;]+)(?:.*charset=([^\s;]+))?', re.I)
  MAX_REDIRECT = 5
  
  def __init__(self, cookie_file=None, debug=None):
    self.cookiejar = None
    self.debug = debug
    self.cookiejar = cookielib.MozillaCookieJar(cookie_file)
    if cookie_file:
      self.cookiejar.load()
    self.conn = None
    return

  def get(self, url):
    if url.startswith('http:'):
      return self.get_http(url)
    elif url.startswith('file://'):
      return self.get_file(url[7:])
    else:
      return self.get_file(url)

  def get_file(self, path):
    if self.debug:
      self.debug.write('File: %r\n' % path)
    fp = file(path, 'rb')
    (content_type, charset) = mimetypes.guess_type(path)
    if self.debug:
      self.debug.write('Content-type: %r, %r\n' % (content_type, charset))
    return (fp, content_type, charset)

  def get_http(self, url):
    from httplib import HTTPConnection, BadStatusLine
    from gzipstream import GzipFile
    from urlparse import urlsplit, urljoin
    from urllib import addinfourl
    from urllib2 import Request
    if self.debug:
      self.debug.write('Get: %r\n' % (url))
    # Retry MAX_REDIRECT times.
    for _ in xrange(self.MAX_REDIRECT):
      # forge urllib2.Request object.
      req = Request(url)
      # add cookie headers if necessary.
      if self.cookiejar:
        self.cookiejar.add_cookie_header(req)
      req.headers.update(self.HEADERS)
      # get response.
      if not self.conn or self.conn.hostport != req.get_host():
        if self.debug:
          self.debug.write('Making connection: %r...\n' % (req.get_host()))
        self.conn = HTTPConnection(req.get_host())
        self.conn.hostport = req.get_host()
      self.conn.request(req.get_method(), req.get_selector(), req.get_data(), req.headers)
      if self.debug:
        self.debug.write('Request-Header: %r\n' % req.headers)
      resp = self.conn.getresponse()
      if self.debug:
        self.debug.write('Response: Status=%r, Headers=%r\n' % (resp.status, resp.getheaders()))
      # Need to read everything first.
      fp = StringIO(resp.read())
      # interpret the encoding.
      if 'gzip' in resp.getheader('Content-Encoding', '').lower():  # 'gzip' or 'x-gzip'
        fp = GzipFile(fileobj=fp)
      # get cookie received.
      if self.cookiejar:
        r = addinfourl(fp, resp.msg, url)
        r.code = resp.status
        self.cookiejar.extract_cookies(r, req)
      # check the result code.
      status = resp.status
      if status in (301, 302):
        url = urljoin(url, resp.getheader('Location', ''))
        continue
      break
    else:
      if self.debug:
        self.debug.write('Maximum redirection limit reached: %r' % url)
      raise IOError('Maximum redirection limit reached: %r' % url)
    if status != 200:
      raise IOError('Status=%d: Error: %r' % (status, url))
    # got it.
    content_type = resp.getheader('Content-Type', 'text/plain')
    (content_type, charset) = self.CONTENT_TYPE_PAT.match(content_type).groups()
    if self.debug:
      self.debug.write('Content-type: %r, %r\n' % (content_type, charset))
    return (fp, content_type, charset)

  
# main
def main(argv):
  agent = Agent(debug=sys.stderr)
  (fp, content_type, charset) = agent.get(argv[1])
  for line in fp:
    print repr(line)
  return 0

if __name__ == '__main__': sys.exit(main(sys.argv))
