"""
Copyright (C) 2009 Attila-Mihaly Balazs

TwitFeeder is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

TwitFeeder is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU Affero General Public License
along with TwitFeeder.  If not, see <http://www.gnu.org/licenses/>
"""
import time
import re
import cgi
import pickle
import hashlib
import htmllib
import logging
import twitter
import rfc822
from os import environ
from datetime import datetime

MEMCACHE = None

try:
    from google.appengine.api import urlfetch
    from google.appengine.api import images
    from google.appengine.api import memcache
    MEMCACHE = memcache
except ImportError:
    pass

_FETCH_USER_AGENT = "AppEngine-Google; (+http://code.google.com/appengine) (TwitFeeder: x_at_y_or_z@yahoo.com)"
def default_do_fetch(url, headers=None, data=None, timeout=None,
    memcacheKey=None, memcacheTimeout=None):
    logging.debug("Fetching URL: %s" % url)
    fetch_headers = {'User-Agent' : _FETCH_USER_AGENT}
    if headers:
        for k, v in headers: fetch_headers[k] = v
    result = urlfetch.fetch(url=url, payload=data,
        method=urlfetch.POST if data else urlfetch.GET,
        headers=fetch_headers, deadline=timeout, follow_redirects=False)
    if not(result.status_code in [200, 301, 302]):
        raise urlfetch.Error('Failed to fetch url %s' % url)
    if memcacheKey and memcacheTimeout:
        MEMCACHE.add(memcacheKey, pickle.dumps(result), memcacheTimeout)
    return result

TWITTER_USER = None
TWITTER_PASS = None
DOFETCH = default_do_fetch

try:
    import privatedata
    TWITTER_USER = privatedata.TWITTER_USER
    TWITTER_PASS = privatedata.TWITTER_PASS
    privatedata._FETCH_USER_AGENT = _FETCH_USER_AGENT
    DOFETCH      = privatedata.proxied_do_fetch
except ImportError:
    pass

def get_base_url():
    url = 'http://'
    if environ.get('HTTP_HOST'):
        url += environ['HTTP_HOST']
    else:
        url += environ['SERVER_NAME']

        if environ['wsgi.url_scheme'] == 'https':
            if environ['SERVER_PORT'] != '443':
               url += ':' + environ['SERVER_PORT']
        else:
            if environ['SERVER_PORT'] != '80':
               url += ':' + environ['SERVER_PORT']
    return url


# from http://wiki.python.org/moin/EscapingHtml
def unescape(s):
    p = htmllib.HTMLParser(None)
    p.save_bgn()
    p.feed(s)
    return p.save_end()

class StatusFetcher:
    partial_re_twt_user = "#?\w{1,20}"
    partial_re_twt_user_urlencoded = "(?:%23)?\w{1,20}"
    re_twt_user = re.compile(r"^@?(" + partial_re_twt_user + ")$")
    re_at_user  = re.compile(r"^(?:http[s]?://)?(.*)/@?(" + partial_re_twt_user + ")$", re.IGNORECASE)

    re_whitespaces  = re.compile(r"(\s+)")
    re_at_user_repl = re.compile(r"@" + partial_re_twt_user)
    re_hashtag      = re.compile(r"#\w+")
    re_embedded_url = re.compile(r"[a-z]+://\S+")
    re_url_opt_brk  = re.compile(r"(\w)/(?=.)")
    re_email        = re.compile(r"\w+.*@.*\w")
    re_html_tags    = re.compile(r"<.*?>")

    smileys_map = {
        ':)'  : 'icon_smile.gif',
        ':-)' : 'icon_smile.gif',
        '(^-^)' : 'icon_smile.gif',
        ';)'  : 'icon_wink.gif',
        ';-)' : 'icon_wink.gif',
        '^_-' : 'icon_wink.gif',
        '^_~' : 'icon_wink.gif',
        ':s'  : 'icon_confused.gif',
        ':-s' : 'icon_confused.gif',
        ':((' : 'icon_cry.gif',
        ':\'('  : 'icon_cry.gif',
        ':\'-(' : 'icon_cry.gif',
        'B)'  : 'icon_cool.gif',
        'B-)' : 'icon_cool.gif',
        ':p'  : 'icon_razz.gif',
        ':-p' : 'icon_razz.gif',
        '>:-)' : 'icon_twisted.gif',
        '>:)'  : 'icon_twisted.gif',
        ']:-)' : 'icon_twisted.gif',
        ']:)'  : 'icon_twisted.gif',
        '8-)' : 'icon_eek.gif',
        '8)'  : 'icon_eek.gif',
        ':-D' : 'icon_biggrin.gif',
        ':D'  : 'icon_biggrin.gif',
        'C='  : 'icon_idea.gif',
        ':-o' : 'icon_surprised.gif',
        ':o'  : 'icon_surprised.gif',
        'o_o' : 'icon_surprised.gif',
    }

    # timeouts for memcache entires
    disable_memcache               = False
    username_to_id_mapping_timeout = 1 if disable_memcache else 24 * 60 * 60
    user_image_timeout             = 1 if disable_memcache else 2 * 24 * 60 * 60
    feed_timeout                   = 1 if disable_memcache else 10 * 60
    url_unshortened_timeout        = 1 if disable_memcache else 7 * 24 * 60 * 60;

    @staticmethod
    def _get_api(baseUrl, doTest=True):
        baseUrl = baseUrl.rstrip('/').lower()
        logging.debug("Getting API for: %s" % baseUrl)
        result = None
        if TWITTER_USER and TWITTER_PASS and -1 != baseUrl.find('twitter.com'):
            result = twitter.Api(username=TWITTER_USER, password=TWITTER_PASS,
                cache=TWITTER_CACHE)
        elif -1 != baseUrl.find('wordpress.com'):
            result = twitter.Api(cache=TWITTER_CACHE, base_url='twitter-api.wordpress.com')
            # the wordpress "twitter api" doesn't implement the test method
            doTest = False
        else:
            result = twitter.Api(cache=TWITTER_CACHE, base_url=baseUrl+'/api')
        result.SetUrllib(_Urllib())
        result.SetUserAgent(_FETCH_USER_AGENT)
        if (not doTest) or result.DoTest(): return result
        logging.error("Test failed for API on %s" % baseUrl)
        return None

    @staticmethod
    def find_user_image(userName, baseUrl):
        key = "userAvatarResized_" + baseUrl + "_"+ userName
        avatar = MEMCACHE.get(key)
        if avatar is not None: return avatar

        api = StatusFetcher._get_api(baseUrl)
        user = api.GetUser(userName)
        result = DOFETCH(user.GetProfileImageUrl())
        img = images.Image(image_data=result.content)
        img.resize(48, 48)
        img = img.execute_transforms()

        MEMCACHE.add(key, img, StatusFetcher.user_image_timeout)
        return img

    @staticmethod
    def resolve_url(url, resolveUrl=True):
        url = unescape(url)
        logging.debug("Unshortening URL: %s" % url)
        if resolveUrl and url.lower().startswith('http://') or url.lower().startswith('https://'):
            # TODO: maybe multiple request should be done in parallel
            # this doesn't seem possible with the current asynch rpc implementation
            try:
                # we don't care about collisions here, since at worst we mix up some
                # urls (then again, good luck finding collisions in 140 bytes)
                key = "urlUnshortened_" + hashlib.md5("TwitFeeder_" + url).hexdigest()
                cached_url = MEMCACHE.get(key)
                if cached_url:
                    logging.debug("URL %s unshortened from cache: %s" % (url, cached_url))
                    url = cached_url
                else:
                    result = DOFETCH(url.encode("utf-8"))
                    if 'Location' in result.headers:
                        logging.debug("URL %s unshortened from request: %s" % (url, result.headers['Location']))
                        url = result.headers['Location']
                    MEMCACHE.add(key, url, StatusFetcher.url_unshortened_timeout)
            except urlfetch.Error, e:
                # logging is not needed here, since many urlfetches fail and they clog up the log
                # logging.error("Error fetching URL %s: %s" % (url, e))
                pass
        # put optional breakpoints in the URL
        brk_url = StatusFetcher.re_url_opt_brk.sub(lambda m: cgi.escape(m.group(1)) + "/&#8203;", url)
        return "<a href='" + cgi.escape(url) + "'>" + brk_url + "</a>"

    @staticmethod
    def fetch_feed_for_user(userName, baseUrl):
        api = StatusFetcher._get_api(baseUrl)
        statuses = api.GetUserTimeline(id=userName)
        return statuses

    @staticmethod
    def fetch_feed_for_hashtag(hashtag, baseUrl):
        api = StatusFetcher._get_api(baseUrl)
        statuses = api.GetSearchResults(hashtag)
        return statuses

    @staticmethod
    def _spiceup_text(text, baseUrl, resolveUrl=True):
        result = ''

        type = 'identica'
        if -1 != baseUrl.lower().find('twitter.com'): type = 'twitter'
        if -1 != baseUrl.lower().find('wordpress.com'): type = 'wordpress'

        for part in StatusFetcher.re_whitespaces.split(text):
            if StatusFetcher.re_embedded_url.match(part):
                result += StatusFetcher.resolve_url(part, resolveUrl)
            elif StatusFetcher.re_at_user_repl.match(part):
                result += "&#8203;@<a href='{{ base_url }}/%s'>%s</a>" \
                    % (part[1:], part[1:])
            elif StatusFetcher.re_hashtag.match(part) and 'twitter' == type:
                result += "&#8203;#<a href='{{ base_url }}/search?q=%s'>%s</a>" \
                    % (part[1:], part[1:])
            elif StatusFetcher.re_hashtag.match(part) and 'identica' == type:
                result += "&#8203;#<a href='{{ base_url }}/tag/%s'>%s</a>" \
                    % (part[1:], part[1:])
            elif part.lower() in StatusFetcher.smileys_map:
                result += '<img width="15" height="15" src="{{ root_url }}/static/smileys/%s" alt="%s">' \
                    % (StatusFetcher.smileys_map[part.lower()], part)
            elif part.upper() in StatusFetcher.smileys_map:
                result += '<img width="15" height="15" src="{{ root_url }}/static/smileys/%s" alt="%s">' \
                    % (StatusFetcher.smileys_map[part.upper()], part)
            elif StatusFetcher.re_email.match(part):
                result += '<a href="mailto:%s">%s</a>' % (part, part)
            else:
                result += part
        return result
    
    @staticmethod
    def fetch_feed(user_or_hashtag, baseUrl):
        global MEMCACHE
        key = "processedFeed_" + baseUrl + "_" + user_or_hashtag
        pickled_feed = MEMCACHE.get(key)
        if pickled_feed:
            feed = pickle.loads(str(pickled_feed))
            logging.debug("Retrieved statuses from memcache for %s / %s" % (user_or_hashtag, baseUrl))
            logging.debug("Status count: %d" % len(feed['statuses']))
            return feed
        feed = {}
        if user_or_hashtag.startswith('#'):
            feed['statuses'] = StatusFetcher.fetch_feed_for_hashtag(user_or_hashtag, baseUrl)
            feed['is_hashtagfeed'] = True
        else:
            feed['statuses'] = StatusFetcher.fetch_feed_for_user(user_or_hashtag, baseUrl)
            feed['is_hashtagfeed'] = False
        # apply postprocessing to feed
        for entry in feed['statuses']:
            entry.title = StatusFetcher.re_html_tags.sub('', entry.text)
            entry.created_at_date = datetime.fromtimestamp(time.mktime(rfc822.parsedate(entry.created_at)))
            text = StatusFetcher._spiceup_text(entry.text, baseUrl)
            text = text.replace('{{ base_url }}', 'http://' + baseUrl).replace('{{ root_url }}', get_base_url())
            entry.text = text
            entry.geolink = None
            if entry.geo and 'Point' == entry.geo['type']:
                entry.geolink = 'http://www.openstreetmap.org/?mlat=%.4f&mlon=%.4f&zoom=15' \
                    % (entry.geo['coordinates'][0], entry.geo['coordinates'][1])
        MEMCACHE.add(key, pickle.dumps(feed), StatusFetcher.feed_timeout)
        logging.debug("Fetched statuses for %s / %s" % (user_or_hashtag, baseUrl))
        logging.debug("Status count: %d" % len(feed['statuses']))
        return feed

    @staticmethod
    def extract_url_parts(path):
        username = StatusFetcher.re_at_user.search(path)
        if username:
            baseUrl = username.group(1)
            if "" == baseUrl or -1 != baseUrl.lower().find('twitter.com'):
                baseUrl = 'twitter.com'
            return (baseUrl, username.group(2))
        username = StatusFetcher.re_twt_user.search(path)
        if username:
            return ('twitter.com', username.group(1))
        return None

class _TwitterCache(object):
    _KEY_PREFIX = 'twitter_cache_'
    _TIMEOUT    = 600

    def Get(self, key):
        key = self._KEY_PREFIX + key
        return MEMCACHE.get(key)
    
    def Set(self, key, data):
        key = self._KEY_PREFIX + key
        MEMCACHE.add(key, data, self._TIMEOUT)

    def Remove(self, key):
        key = self._KEY_PREFIX + key
        MEMCACHE.delete(key)

    def GetCachedTime(self, key):
        return None

TWITTER_CACHE = _TwitterCache()

class _Urllib:
    __version__  = '0.1'
    addheaders   = {}
    fetch_result = None        
    
    def HTTPBasicAuthHandler(self):
        return self
    
    def add_password(self, *args):
        pass

    def build_opener(self, *args):
        return self

    def open(self, url, data=None, timeout=None):
        result = DOFETCH(url=url, data=data,
            headers=self.addheaders, timeout=timeout)
        self.fetch_result = result.content
        return self

    def read(self):
        return self.fetch_result

    def close(self):
        pass

if __name__ == "__main__":
    pass
