#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python -*-

import re, os, time, urllib, urllib2, sys, logging, zlib

try:
  from email.utils import parsedate_tz, mktime_tz
except:
  #For python2.4
  from email.Utils import parsedate_tz, mktime_tz

import boto, feedparser
from boto.s3.key import Key

from failedrequest import *

AWS_ACCESS_KEY = None
AWS_SECRET_KEY = None
BUCKET_NAME = None
EXPIRED_SECOND = 300
ITEM_EXPIRE_TIME = 300
AFTER_CREATE_TIMEOUT = 2
BEFORE_DELETE_TIMEOUT = 2
MAX_FEED_LENGTH = 20000

#'''
#ZetaPrints creates a User ID for every new request asking for image
#generation. There are some controls in place and users sending excessive
#number of requests can be blocked. If the sender does not supply a user ID
#it will be generated for every new request. There are controls how many new
#IDs can be generated per IP.
#'''
#ZETAPRINTS_USER_ID = None

try:
  from settings import *
except:
  pass

import utils

no_settings = not (bool(AWS_ACCESS_KEY) & bool(AWS_SECRET_KEY) & bool(BUCKET_NAME) & bool(EXPIRED_SECOND))

TEMPLATES_LIST = None
FEEDS_LIST = None

try:
  from restrictions import *
except:
  pass

#URI_PATTERN = r'^\/(.+)_(.+)$'
#TWITTER_RSS_PATTERN = r'http://twitter.com/statuses/user_timeline/\d+.rss'

#boto.set_file_logger('boto', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'twictures-boto.log'))

def null_logger_for_boto():
  format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
  logger = logging.getLogger('boto')
  logger.setLevel(logging.INFO)

  class NullHandler(logging.Handler):
    def emit(self, record):
      pass

  handler = NullHandler()
  handler.setLevel(logging.INFO)

  formatter = logging.Formatter(format_string)
  handler.setFormatter(formatter)

  logger.addHandler(handler)
  return logger

boto.log = null_logger_for_boto()

#logging.basicConfig(filename = 'twictures.log', level = logging.DEBUG,)

def getS3Connection():
  return boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_KEY)

def get_simpledb_connection ():
  return boto.connect_sdb(AWS_ACCESS_KEY, AWS_SECRET_KEY, debug = 1)

def getImageFromCache (templateId, twitterName):
  s3 = getS3Connection()
  bucket = s3.get_bucket(BUCKET_NAME)
  key = Key(bucket)
  key.key = templateId + '_' + twitterName

  data = key.read()

  buf = key.read()
  while len(buf) > 0:
    data += buf
    buf = key.read()

  return data

def put_image_to_cache (template_id, name, buf):
  s3 = getS3Connection()

  bucket = s3.lookup(BUCKET_NAME)
  if not bucket:
    bucket = s3.create_bucket(BUCKET_NAME)

  key_name = template_id + '_' + name

  key = bucket.get_key(key_name)

  if not key:
    key = bucket.new_key(key_name)

  tmp = os.tmpfile()
  tmp.write(buf)
  tmp.flush()

  mime = utils.get_image_mime(buf)
  key.content_type = mime
  key.set_contents_from_file(tmp, { 'Content-Type': mime })
  key.set_acl('public-read')

  tmp.close()
  key.close()

def get_text_from_feed (feed):
  if len(feed.entries):
    if feed.feed.link.startswith('http://twitter.com') or feed.feed.link.startswith('https://twitter.com'):
      return feed.entries[0].description

    return feed.entries[0].title

  return ''

def get_feed_cache (sdb, id, template_id):
  feed_cache = { 'etag' : None, 'next_access' : None }

  domain = sdb.lookup(id)
  if not domain:
    return feed_cache

  item = domain.get_item(template_id)
  if not item:
    return feed_cache

  if 'etag' in item:
    feed_cache['etag'] = item['etag']

  if 'next_access' in item:
    feed_cache['next_access'] = float(item['next_access'])

  return feed_cache

def is_feed_expired (feed_cache):
  if feed_cache['next_access']:
    return time.time() >= float(feed_cache['next_access'])

  return True

def cache_feed (sdb, id, template_id, feed = None):
  domain = sdb.lookup(id)
  
  if not domain:
    domain = sdb.create_domain(id)
    item = domain.new_item(template_id)
  else:
    item = domain.get_item(template_id)
    if not item:
      item = domain.new_item(template_id)

  next_access = time.time() + EXPIRED_SECOND

  if feed:
    expires = 0
    if 'expires' in feed.headers:
      expires = mktime_tz(parsedate_tz(feed.headers['expires']))

    if expires < time.time():
      item['next_access'] = next_access
    else:
      item['next_access'] = expires

    etag = None
    if 'etag' in feed.headers:
      etag = '"%s"' % (feed.headers['etag'].split('"')[1])

    item['etag'] = etag
  else:
    item['next_access'] = next_access

  item.save()

def set_mark (filename):
  path = os.path.abspath('marks')

  if not os.path.exists(path):
    os.mkdir(path, 0755)

  path = os.path.join(path, filename)

  try:
    f = open(path, 'w')
    f.close
  except:
    pass

def has_mark (filename):
  path = os.path.abspath(os.path.join('marks', filename))
  try:
    create_time = os.path.getatime(path)
  except os.error:
    return False

  if create_time + EXPIRED_SECOND <= time.time():
    return False
  else:
    return True

def remove_mark (filename):
  path = os.path.abspath(os.path.join('marks', filename))
  try:
    os.remove(path)
  except os.error:
    pass

def update_mark (filename):
  path = os.path.abspath(os.path.join('marks', filename))

  try:
    os.utime(path, None)
  except:
    set_mark(filename)

def parse_request (environ):
  path = environ['PATH_INFO'][1 : ]

  if not path:
    return None

  if not environ['QUERY_STRING']:
    return None

  rss_feed = environ['QUERY_STRING']
  id = urllib.quote(rss_feed[rss_feed.find('//') + 2 : ].replace('/', '_').replace('.', '_')).replace('%', '_')

  if not id:
    return None

  if path.endswith('/'):
    path = path[ : -1]

  path = path.split('/')

  template_id = path[0]

  try:
    size = path[1]
    size = size.split('x')

    try:
      width = int(size[0])

      if width < 50:
        width = 50
    except:
      width = None

    try:
      height = int(size[1])

      if height < 50:
        height = 50
    except:
      height = None

    size = ( width, height )
  except:
    size = None

  return template_id, size, rss_feed, id

def check_for_restrictions (template_id, rss_feed):
  template_result = False
  if TEMPLATES_LIST:
    if template_id in TEMPLATES_LIST:
      template_result = True
  else:
    template_result = True

  feeds_result = False
  if FEEDS_LIST:
    if rss_feed in FEEDS_LIST:
      feeds_result = True
  else:
    feeds_result = True

  return template_result and feeds_result

def can_request_image (sdb, template_id, id, remote_ip, request_id):
  domain = sdb.lookup('image_generation_requests')

  if not domain:
    domain = sdb.create_domain('image_generation_requests')
    items = []
  else:
    items = []
    try:
      items.extend(domain.select("SELECT itemName() FROM `image_generation_requests` WHERE itemName() LIKE '%s_%s%%' AND `timestamp` != '' ORDER BY `timestamp` ASC" % (template_id, id)))
    except:
      pass

  expired_time = time.time() - ITEM_EXPIRE_TIME
  for item in items:
    if float(item['timestamp']) < expired_time:
      domain.delete_item(item)
      items.remove(item)

  if len(items) == 0:
    item_new = domain.new_item('%s_%s_%s_%s' % (template_id, id, remote_ip, request_id))
    timestamp = utils.zero_padded_timestamp_str(time.time())
    item_new['timestamp'] = timestamp
    item_new.save()
    time.sleep(AFTER_CREATE_TIMEOUT)

    items = []
    try:
      items.extend(domain.select("SELECT itemName() FROM `image_generation_requests` WHERE itemName() LIKE '%s_%s%%' AND `timestamp` < '%s' ORDER BY `timestamp` ASC" % (template_id, id, timestamp)))
    except:
      pass

    for item in items:
      if float(item['timestamp']) < expired_time:
        items.remove(item)

    if len(items) == 0:
      return True
    else:
      domain.delete_item(item_new)
      return False

  else:
    return False

def remove_request_for_image (sdb, template_id, id, remote_ip, request_id):
  domain = sdb.lookup('image_generation_requests')

  if domain:
    domain.delete_attributes('%s_%s_%s_%s' % (template_id, id, remote_ip, request_id))

def handler (environ, start_response):
  if no_settings:
    start_response('200 OK', [('Content-type', 'text/plain')])
    return ['Please tune up your twictures installation.']

  request = parse_request(environ)

  if not request:
    start_response('404 Not found', [('Content-type', 'text/plain')])
    return ['404 - Resource not found']

  template_id, image_size, rss_feed, id  = request

  if is_failed_request(template_id + '_' + id, EXPIRED_SECOND):
    start_response('404 Not found', [('Content-type', 'text/plain')])
    return ['404 - Resource not found']

  if not check_for_restrictions(template_id, rss_feed):
    start_response('200 OK', [('Content-type', 'text/plain')])
    return ['Template ID or RSS URL are not allowed']

  response_headers = [('Content-type', 'image/jpeg')]

  if has_mark(template_id + '_' + id):
    response_headers.append(('location', 'http://%s.s3.amazonaws.com/%s_%s' % (BUCKET_NAME, template_id, id)))
    start_response('307 Moved Permanently', response_headers)
    return ['']

  sdb = get_simpledb_connection()

  feed_cache = get_feed_cache(sdb, id, template_id)

  info = None
  if is_feed_expired(feed_cache):
    try:
      req = urllib2.Request(rss_feed, headers = { 'If-None-Match' : feed_cache['etag'] })
      f = urllib2.urlopen(req)
      info = f.info()

      rss_feed = f
    except urllib2.HTTPError, e:
      if e.code == 304:
        cache_feed(sdb, id, template_id)

        try:
          update_mark(template_id + '_' + id)
        except:
          pass

        response_headers.append(('location', 'http://%s.s3.amazonaws.com/%s_%s' % (BUCKET_NAME, template_id, id)))
        start_response('307 Moved Permanently', response_headers)
        return ['']
      else:
        info = e.info()
    except:
      pass

    if info:
      if 'Content-Type' in info:
        if not('application/rss+xml' in info['Content-Type']
            or 'application/atom+xml' in info['Content-Type']
            or 'application/rdf+xml' in info['Content-Type']):
          update_failed_request(template_id + '_' + id)
          start_response('404 Not found', [('Content-type', 'text/plain')])
          return ['404 - Wrong feed content type']

      if 'Content-Length' in info:
        if int(info['Content-Length']) > MAX_FEED_LENGTH:
          update_failed_request(template_id + '_' + id)
          start_response('404 Not found', [('Content-type', 'text/plain')])
          return ['404 - RSS feed size exceedes limit ']

    feed = feedparser.parse(rss_feed, etag = feed_cache['etag'])

    if 'status' not in feed:
      start_response('404 Not found', [('Content-type', 'text/plain')])
      return ['404 - RSS feed not found']

    if feed.status == 200:
      request_id = str(utils.generate_request_id(template_id, id, environ['REMOTE_ADDR']))
      if not can_request_image(sdb, template_id, id, environ['REMOTE_ADDR'], request_id):
        if feed_cache['next_access']:
          response_headers.append(('location', 'http://%s.s3.amazonaws.com/%s_%s' % (BUCKET_NAME, template_id, id)))
          start_response('307 Moved Permanently', response_headers)
          return ['']
        else:
          start_response('404 Not found', [('Content-type', 'text/plain')])
          return ['404 - Resource not found']

      text = get_text_from_feed(feed).encode(feed.encoding)

      # _1 - Message
      params_dict = { 'TemplateID' : template_id, '_1' : text }

      if image_size:
        if image_size[0]:
          params_dict['Width'] = image_size[0]

        if image_size[1]:
          params_dict['Height'] = image_size[1]

      #See Issue 16
      #if ZETAPRINTS_USER_ID:
      #  params_dict['ID']  = ZETAPRINTS_USER_ID

      params = urllib.urlencode(params_dict)

      try:
        f = urllib.urlopen("http://zetaprints.com/?page=template-preview-ecard", params)
        url = f.read()
        f.close()
        f = urllib.urlopen(url)
        image_data = f.read()
        f.close()
      except:
        update_failed_request(template_id + '_' + id)
        start_response('404 Not found', [('Content-type', 'text/plain')])
        return ['404 - Wrong template or ZetaPrints problems']

      cache_feed(sdb, id, template_id, feed)
      put_image_to_cache(template_id, id, image_data)

      try:
        update_mark(template_id + '_' + id)
      except:
        pass

      time.sleep(BEFORE_DELETE_TIMEOUT)
      remove_request_for_image(sdb, template_id, id, environ['REMOTE_ADDR'], request_id)

      response_headers.append(('location', 'http://%s.s3.amazonaws.com/%s_%s' % (BUCKET_NAME, template_id, id)))
      start_response('307 Moved Permanently', response_headers)
      return ['']
    elif feed.status == 304:
      cache_feed(sdb, id, template_id)
    else:
      start_response(feed.headers['status'], [('Content-type', feed.headers['content-type'])])

      if 'subtitle'  in feed.feed:
        return [feed.feed.subtitle.encode(feed.encoding)]

      return ['']

  try:
    update_mark(template_id + '_' + id)
  except:
    pass

  response_headers.append(('location', 'http://%s.s3.amazonaws.com/%s_%s' % (BUCKET_NAME, template_id, id)))
  start_response('307 Moved Permanently', response_headers)
  return ['']
