# !/usr/bin/env python
#
# Copyright 2008 CPedia.com.
#
# New portions Copyright 2009, 2010 Rob Stenzinger 
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

__author__ = 'Ping Chen, Rob Stenzinger'


import os
import re
import datetime
import calendar
import logging
import string
import urllib

import random

from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import urlfetch

import twitter
import feedparser

import yaml

import model
from model import Archive,Weblog,WeblogReactions,Tag,Feeds,Book,Chapter

import simplejson
import cgi
import urllib, hashlib

from akismet import Akismet

# these define the property of "nav_mode" on the nav_context object:
user_nav_context_options = "timeline,type,book,book-chapter,tag,search".split(",")
user_nav_context_default = "timeline"

def get_config():
    key_ = "comicaster_config_key"
    try:
        result = memcache.get(key_)
    except Exception:
        result = None
    if result is None:
        dirname = os.path.dirname(__file__)
        config_file = open(os.path.join(dirname,"comicaster_config.yaml")).read()
        result = yaml.load(config_file)
        # short cache time to allow balance of performance and changing of config
        memcache.add(key=key_, value=result, time=60)
    else:
        getLogger(__name__).debug("get comicaster config from cache. ")
    return result    


# Module methods to handle incoming data
def get_datetime(time_string):
    if time_string:
        return datetime.datetime.strptime(time_string, '%Y-%m-%d %H:%M:%S')
    return datetime.datetime.now()

# get url friendly title:
def slugify(s):
    s = re.sub('\s+', '-', s)
    s = re.sub('[^\w.-]', '', s)
    return s.strip('_.- ').lower()

def removepunctuation(str):
    punctuation = re.compile(r'[.?!,":;]')
    str = punctuation.sub("", str)
    return str

def u(s, encoding):
    if isinstance(s, unicode):
        return s
    else:
        return unicode(s, encoding)



#get recent comments. Cached.
def getRecentReactions():
    key_ = "blog_recentReactions_key"
    try:
        recentReactions = memcache.get(key_)
    except Exception:
        recentReactions = None
    if recentReactions is None:
        #reactions.filter('status = ', model.config_response_status_default_ready)
        
        #todo: consider merging in recent blogs and reactions
        #recentBlogs = db.GqlQuery("select * from Weblog order by lastCommentedDate desc").fetch(10)
        
        recentReactions = WeblogReactions.all()
        #recentReactions.filter("weblog =", blog)
        recentReactions.filter('status = ', model.config_response_status_default_ready)
        recentReactions.order("-date") #.order("date")
        return recentReactions.fetch(10)
        
        memcache.add(key=key_, value=recentReactions, time=3600)
    else:
        getLogger(__name__).debug("getRecentReactions from cache. ")

    return recentReactions



def getArchiveBlogNumericMonth(month, year):
    months = [
              "January",
              "February",
              "March",
              "April",
              "May",
              "June",
              "July",
              "August",
              "September",
              "October",
              "November",
              "December",
              ]
    if int(month) > 12 or int(month) < 1:
        return None
    else:
        monthName = months[int(month)-1]
        return getArchiveBlog(monthName + "-" + str(year))

#get blogs in some month. Cached.
def getArchiveBlog(monthyear):
    monthyear1 = re.sub( "-", "_", monthyear )
    key_= "blog_archive_"+monthyear1+"_key"
    monthyearTmp = re.sub( "-", " ", monthyear )
    try:
        blogs = memcache.get(key_)
    except Exception:
        blogs = None
    if blogs is None:
        blogs = Weblog.all()
        blogs.filter('monthyear =', monthyearTmp)
        blogs.filter("status =", model.config_post_status_default_ready)
        blogs_result = blogs.order('-date').fetch(100)
        #todo:
        #blogs = Weblog.all().filter('monthyear =', monthyearTmp).filter('entrytype','post').order('-date')
        #blogs = db.GqlQuery("select * from Weblog where monthyear=:1 and entrytype = 'post'order by date desc",monthyearTmp).fetch(100)
        #blogs = db.GqlQuery("select * from Weblog where monthyear=:1 order by date desc",monthyearTmp).fetch(100)
        memcache.add(key=key_, value=blogs_result, time=3600)
    else:
        getLogger(__name__).debug("getMonthBlog from cache. ")
    return blogs


#get tag list. Cached.
def getTagList():
    key_ = "blog_tagList_key"
    try:
        tags = memcache.get(key_)
    except Exception:
        tags = None
    if tags is None:
        tags = Tag.gql('WHERE valid =:1 ORDER BY tag',True).fetch(1000)
        memcache.add(key=key_, value=tags, time=3600)
    else:
        getLogger(__name__).debug("getTagList from cache. ")
    return tags


#flush tag list.
def flushTagList():
    memcache.delete("blog_tagList_key")

#flush MonthYear list.
def flushArchiveList():
    memcache.delete("blog_monthyear_key")

#flush MonthYear list.
def flushArchiveBlog(monthyear):
    monthyear1 = re.sub( "-", "_", monthyear )
    key_= "blog_archive_"+monthyear1+"_key"
    memcache.delete(key_)

#flush recent comments.
def flushRecentReactions():
    memcache.delete("blog_recentReactions_key")

#flush blog pagination.
def flushBlogPagesCache():
    memcache.delete("blog_pages_key")

#flush month-cached blog.
def flushBlogMonthCache(blog):
    if blog.date is None:
        blog.date = datetime.datetime.now()
    year_ =  blog.date.year
    month_ =  blog.date.month
    key= "blog_year_month_"+str(year_)+"_"+str(month_)+"_key"
    memcache.delete(key)


def getGravatarUrlByUser(user):
    default = "/img/anonymous.jpg"
    if user is not None:
        try:
            email = user.email()
        except Exception:
            email = user.email
        return getGravatarUrl(email)
    else:
        return default

def getGravatarUrl(email):
    config = get_config()
    default = config["root_url"]+"/assets/avatar_anonymous.png"
    size = 64
    gravatar_url = "http://www.gravatar.com/avatar.php?"
    gravatar_url += urllib.urlencode({'gravatar_id':hashlib.md5(str(email)).hexdigest(), 'default':default, 'size':str(size)})
    return gravatar_url

def getUserNickname(user):
    default = "anonymous"
    if user:
        try:
            email = user.email()
        except Exception:
            email = user.email
        if user.nickname:
            return user.nickname
        elif email:
            return email.split("@")[0]
    else:
        return default

def getLogger(loggerName):
    """get logger to use in every model"""
    #create a logger to use
    logger = logging.getLogger(loggerName)
    logger.setLevel(logging.DEBUG)
    return logger

def getUser():
    return users.get_current_user()


    
# only gets "published" blogs
def get_blogs(handler, page, nav_context_alternate=None, results_limit=None):
    if results_limit:
        results_limit = int(results_limit) 
               
    if nav_context_alternate:
        nav_context = nav_context_alternate
    else:
        nav_context = handler.nav_context
        
    key_ = handler.request.path
    
    avoidcache = True #disable cache for blog query results
    
    if not avoidcache:
        try:
            obj_pages = memcache.get(key_)
        except Exception:
            obj_pages = None
    else:
        obj_pages = None
                
    if obj_pages is None or page not in obj_pages:
        blogs = Weblog.all()
        blogs.filter("status =", model.config_post_status_default_ready)
                        
        #todo: more types... video, image, blog, game... etc.
        if nav_context["type"] in model.config_post_types: 
            lookup_type = str(nav_context["type"])
            blogs.filter("entrytype =", lookup_type)
            
        if not str(nav_context["book"]).isspace() and len(nav_context["book"]) > 0:
            blogs.filter("book =", nav_context["book"])
                
        if not str(nav_context["chapter"]).isspace() and len(nav_context["chapter"]) > 0:
            blogs.filter("chapter =", nav_context["chapter"])

        if len(nav_context["permalink"]) > 0:
            blogs.filter("permalink = ", nav_context["permalink"])
                                
        if len(nav_context["tag"]) > 0:
            if len(nav_context["tag"][0]) == 1:
                blogs.filter("tags", nav_context["tag"])
            else:
                for item in nav_context["tag"]:
                    item = str(item)
                    if not item.isspace():
                        blogs.filter("tags", item)

        result_count = blogs.count()            
        if result_count > 1:
            
            # sort
            if len(nav_context["sort"]) > 0:
                blogs.order(nav_context["sort"]) 
            else:
                blogs.order("-date")        
            
            result_object = {"list":None,"single":None}
            obj_pages = {}
            start_position = 0
            total_pages = 0
            
            use_random = False
            use_jump = False
 
            if nav_context["random"] and nav_context["random"] == True:
                start_position = random.randrange(1, result_count, 1)            
                use_random = True
            elif nav_context["jump"] and nav_context["jump"] == True:
                use_jump = True
            
            if use_random == True:
                obj_pages["object_list"] = blogs.fetch(1, start_position)
                return {"list":None,"single":obj_pages["object_list"][0]}
            elif use_jump == True:
                obj_pages["object_list"] = blogs.fetch(1, 0)
                return {"list":None,"single":obj_pages["object_list"][0]}
            else:
                #todo: revisit the forced limit!
                results_limit = handler.config["results_page_post_per_page_limit"]
                #if not results_limit:
                #    results_limit = 

                if page > 0:
                    start_position = (page * results_limit) - results_limit
                                        
                obj_pages["object_list"] = blogs.fetch(results_limit, start_position)
                total_pages = round(result_count/results_limit,0)            

#todo: results limit:
#            if results_limit:
#                obj_pages["object_list"] = blogs.fetch(results_limit, start_position) #fetch(100)
#                total_pages = round(result_count/results_limit,0)
#            else:
#                obj_pages["object_list"] = blogs.fetch(results_limit, start_position) #fetch(100)
#                total_pages = round(result_count/results_limit,0)
            #obj_pages["object_list"] = blogs.fetch(results_limit, start_position)
                
            result_object["list"] = obj_pages
            result_object["total_pages"] = total_pages
            result_object["current_page"] = page
            result_object["results_per_page"] = results_limit
            
            return result_object
                            
        elif result_count == 0:
            return {"list":None,"single":None}
        else:  
            return {"list":None,"single":blogs[0]}
    else:
        pass
#        getLogger(__name__).debug("getBlogPagination from cache. ")


    # already in memcache or freshly added to memcache:
    return {"list":obj_pages[page],"single":None}
    
    
        

"""
get a url based on the current page + user navigation context
"""
def nav_relative_url_list(handler, blog, nav_mode):
    blog_references = nav_relative(handler, blog, nav_mode)
    result = {}
    result['first_url'] = None
    result['previous_url'] = None
    result['next_url'] = None
    result['latest_url'] = None
    
    if blog_references['first_url']:
        result['first_url'] = blog_references['first_url'].permalink
        
    if blog_references['previous_url']:
        result['previous_url'] = blog_references['previous_url'].permalink
        
    if blog_references['next_url']:
        result['next_url'] = blog_references['next_url'].permalink
        
    if blog_references['latest_url']:
        result['latest_url'] = blog_references['latest_url'].permalink
        
    #todo: switch all templates to this new approach:
    result['blog_references'] = blog_references
    return result 
    
    
def nav_relative(handler, blog, nav_mode):
    # it's assumed that the blog variable is representative of all relative nav context

    first_url = ""
    previous_url = ""
    next_url = ""
    latest_url = ""
    
    # filter the blogs/pages based on current nav context
    if not nav_mode:
        nav_context = get_user_nav_context(handler)
        nav_mode = nav_context["nav_mode"]

    blogs = Weblog.all()
    blogs.filter("status =", model.config_post_status_default_ready)

    #default context is "timeline"
    #todo: filter words...     

    try:
        if nav_mode == "book" or nav_mode == "book-chapter": #in ["book", "book-chapter"]:
            if blog is not None and blog.book is not None:
                blogs.filter("book", blog.book)
                getLogger(__name__).debug("using book filter")
    except:
        #todo: added to deal with old data...
        pass

    try:
        if nav_mode == "chapter" or nav_mode == "book-chapter": #in ["chapter", "book-chapter"]:
            if blog.chapter is not None:
                blogs.filter("chapter", blog.chapter)
                getLogger(__name__).debug("using chapter filter")
    except:
        #todo: added to deal with old data...
        pass
    
    #todo:
    # get tag(s) from filter ... 
    #if nav_context == "tag":
    #    blogs.filter("tag", blog.chapter)
    
    if blog is not None and nav_mode == "type":
        blogs.filter("entrytype", blog.entrytype)
        getLogger(__name__).debug("using type filter")
    elif nav_mode == "timeline":
        blogs.filter("entrytype in", model.config_post_timeline_types)

        
    #search-filter? 
    blogs.order("date")
    currentIndex = 0
    indexCount = 0
   
    # find the current page in the results, determine the relative nav based on that position 
    for item in blogs:
        try:
            if item.permalink == blog.permalink:
                currentIndex = indexCount
        except:
            #todo...,
            pass
        indexCount += 1
        
    if currentIndex == indexCount-1 and indexCount > 1:
        previous_url = db.get(blogs[currentIndex-1].key()) #.permalink
        first_url = db.get(blogs[0].key()) #.permalink
        pass 
    elif currentIndex == 0 and indexCount > 1:
        next_url = db.get(blogs[currentIndex+1].key())#.permalink
        latest_url = db.get(blogs[indexCount-1].key()) #.permalink
        pass
    elif currentIndex > 0 and indexCount > 1:
        previous_url = db.get(blogs[currentIndex-1].key()) #.permalink
        next_url = db.get(blogs[currentIndex+1].key()) #.permalink
        first_url = db.get(blogs[0].key()) #.permalink
        latest_url = db.get(blogs[indexCount-1].key()) #.permalink

        
    return {'first_url':first_url,'previous_url':previous_url,'next_url':next_url,'latest_url':latest_url}


def nav_relative_chapter_list(blog):
    try:
        chapters = Chapter.all()        
        book = Book.all().filter('title =',blog.book).fetch(1)[0]
        chapters.filter("book", book)
        chapters.order("sortrank")       
        return chapters.fetch(100)
    except:
        return None


def nav_relative_episode_list(blog):
    try:
        episodes = Weblog.all()        
        episodes.filter("book", blog.book)
        episodes.order("-date")  
        return episodes.fetch(100)
    except:
        return None

"""
Nav Context is intended only for interactions with 
relative-navigation controls.

a first, prev, next, last set of nav buttons for instance.
"""

#todo... review this 
# not all handlers use nav context
# decide re: override of context
def get_user_nav_context(handler):
    nav_context = None
    try:
        try:
            if str(handler.nav_context["nav_mode"]).lower() not in user_nav_context_options:        
                handler.nav_context["nav_mode"] = user_nav_context_default
        except:
            handler.nav_context["nav_mode"] = user_nav_context_default
            
        nav_context = handler.nav_context    
    except:
        pass
    
    return nav_context

def get_user_nav_context_options():
    return user_nav_context_options

    
def akismet_validate_comment_ham(comment, clientip, userweb, usermail, username):    
    api = Akismet(agent="Comic Cloud/Alpha 1 | akismet.py/0.2.0")
    # if apikey.txt is in place,
    # the key will automatically be set
    # or you can call api.setAPIKey()
    #
    result = ""
    config = get_config()
    #ip, email, web
    data = {          
      'comment':comment,
      #'REMOTE_ADDR':'',
      'blog':config["root_url"],
      'user_ip':clientip,
      #'user_agent':'',
      #'referrer':'',
      #'permalink':'',
      'comment_type':'comment',
      'comment_author':username,
      'comment_author_email':usermail,
      'comment_author_url':userweb,
    }
    
    if api.key is None:
        getLogger(__name__).debug("No 'apikey.txt' file.")
    elif not api.verify_key():
        getLogger(__name__).debug("The API key is invalid.")
    else:
        getLogger(__name__).debug("==> valid api key, trying to validate comment:")
        # data should be a dictionary of values
        # They can all be filled in with defaults
        # from a CGI environment
        if api.comment_check(comment, data):
            #print 'This comment is spam.'
            result = "spam"
        else:
            result = "ham"

        getLogger(__name__).debug(result)
            
    if result == "ham":
        return True
    else:
        return False
    
    
def get_blog_reactions(blog):
    reactions = WeblogReactions.all()
    reactions.filter("weblog =", blog)
    reactions.filter('status = ', model.config_response_status_default_ready)
    reactions.order("reply_flat_list")
    return reactions.fetch(1000)
    
def get_feed_items(url):
    key_ = "get_feed_items_" + url
    try:
        items = memcache.get(key_)
    except Exception:
        items = None

    items = None
    try:
        #todo: make configurable
        result_limit = 5
        if items is None:   
            #
            content = urlfetch.fetch(url).content
            feed = feedparser.parse(content)                        

            #getLogger(__name__).debug(feed)
            #getLogger(__name__).debug(feed.entries[0])
            
            if feed.entries:
                results = []
                for item in feed.entries:                 
                    
                    current_text = item.summary
                                                
                    current_item = {
                        'text':current_text,
                        'title':item.title,
                        'link':item.link,
                        'created_at':item.updated_parsed,
                    }                
                    results.append(current_item)
                    if len(results) >= result_limit:
                        break
                              
                memcache.add(key=key_, value=results, time=3600)
        else:
            results = items
    except Exception, e:
        getLogger(__name__).debug("error getting feed items!!!")
        getLogger(__name__).debug(e)
        pass
            
    return results 

       
def get_twitter_user_timeline(user):
    key_ = "get_twitter_user_timeline_" + user
    try:
        statuses = memcache.get(key_)
    except Exception:
        statuses = None

    #statuses = None
    results = None
    try:
        #todo: make configurable
        result_limit = 5
        if statuses is None:                
            api = twitter.Api()
            statuses = api.GetUserTimeline(user) 
            if statuses:
                results = []
                for item in statuses:                    
                    current_text = item["text"]
                    
                    # from "cougar":
                    # remove returns/newlines
                    current_text = re.sub(r'[\n\r\s]+', ' ', current_text)                    
                    # create links for @ tags, http/s urls
                    current_text = re.sub(r'(https?://\S+)', """<a href="\\1">\\1</a>""", current_text)
                    #current_text = re.sub(r'(http?://\S+)', """<a href="\\1">\\1</a>""", current_text)
                    current_text = re.sub(r'@(\S+)', """<a href="http://twitter.com/\\1">@\\1</a>""", current_text)
                            
                    current_item = {
                        'text':current_text,
                        'created_at':item["created_at"],
                    }                
                    results.append(current_item)
                    if len(results) >= result_limit:
                        break
                    
                #getLogger(__name__).debug("statuses: ")                    
                memcache.add(key=key_, value=results, time=3600)
        else:
            results = statuses
    except:
        pass
            
    return results

def archiveList():
    archive = Archive.all().order("date").fetch(1000)
    if archive != []:
        return archive
    else:
        return None

def bookList():
    book = Book.all().order("sortrank").fetch(1000)
    if book != []:
        return book
    else:
        return None
    
def entrycount_reset_books():
    items = Book.all()
    total = 0
    results = []
    for item in items:
        result = Weblog.all().filter("book = ", item.title)
        total = result.count()
        item.entrycount = total
        item.put()
        results.append(item.title + ": " + str(total))
    return results

def entrycount_reset_tags():
    items = Tag.all()
    total = 0
    results = []
    for item in items:
        result = Weblog.all().filter("tags", item.tag)
        total = result.count()
        item.entrycount = total
        item.put()
        results.append(item.tag + ": " + str(total))
    return results
        
def entrycount_reset_chapters():
    items = Chapter.all()
    total = 0
    results = []
    for item in items:
        result = Weblog.all().filter("chapter = ", item.title)
        total = result.count()
        item.entrycount = total
        item.put()
        results.append(item.title + ": " + str(total))
    return results

def recent_comics():
    try:
        recentComics = Weblog.all()
        recentComics.filter("status =", model.config_post_status_default_ready)
        recentComics.filter("entrytype =", "comic")
        recentComics.order("-date") #.order("date")
        return recentComics.fetch(5)
    except:
        return None
    
    
def chapters_first_and_latest_list(blog):
    #todo: book and chapter preview image
    #todo: episode preview image

    key_ = "chapters_first_and_latest_list"
    try:
        books_result = memcache.get(key_)
    except Exception:
        books_result = None        

    if books_result is None:
        books_result = []
        books = Book.all().fetch(100)
        
        for book in books:
            books_result_current = {}
            books_result_current["title"] = book.title
            books_result_current["description"] = book.description
            
            chapters = Chapter.all()        
            chapters.filter("book", book)
            chapters.order("sortrank")
            book_chapters = chapters.fetch(100)            
            books_result_current["chapters"] = []
            
            for chapter in book_chapters:
                current_chapter = {}
                
                episodes = Weblog.all()        
                episodes.filter("status =", model.config_post_status_default_ready)
                episodes.filter("chapter =", chapter.title)
                
                #todo: review if there's a better option other than hardcoding "comic"
                episodes.filter("entrytype =", "comic")
                
                try:
                    episode_for_lookup = episodes.fetch(1)[0]
                    
                    blog_references = nav_relative(None, None, episode_for_lookup, "chapter")
        
                    current_chapter["title"] = chapter.title
                    current_chapter["description"] = chapter.description
                    
                    if blog_references["first_url"] == "":
                        blog_references["first_url"] = episode_for_lookup
                    elif blog_references["latest_url"] == "":
                        blog_references["latest_url"] = episode_for_lookup

                    current_chapter["first_url"] = blog_references["first_url"].permalink
                    current_chapter["first_date"] = blog_references["first_url"].date
                    current_chapter["latest_url"] = blog_references["latest_url"].permalink
                    current_chapter["latest_date"] = blog_references["latest_url"].date
                   
                    books_result_current["chapters"].append(current_chapter)
                except:
                    books_result_current = None
                    pass
                
            if books_result_current:
                books_result.append(books_result_current)
    
        memcache.add(key=key_, value=books_result, time=3600)

    if len(books_result) > 0:
        return books_result
    else:
        return None
        
        

def fetch_asset(filename, template_theme_directory):
    dirname = os.path.dirname(__file__)
    template_folder = template_theme_directory + "/assets/"       
    extension = str(os.path.splitext(filename)[-1]).lower()
    content_header = ""
    template_file = None
    
    if str(filename).lower().find("admin") > 0:
        template_file = os.path.join(dirname, os.path.join('templates', filename))
    elif template_folder != "":
        try:
            template_file = os.path.join(dirname, os.path.join(os.path.join('templates', template_folder), filename))
        except:
            pass

    if template_file is None:
        template_file = os.path.join(dirname, os.path.join('templates', filename))
                
    getLogger(__name__).debug("Using Theme asset at %s", template_file)
    output = open(template_file).read()
    
    if extension in ["png", "gif", "jpg", "jpeg"]:
        content_header = "image/" + extension
    elif extension == "ico":            
        content_header = "text/plain"
    elif extension == "html":
        content_header = "text/html"
    else:
        content_header = "text/plain"

    return output, content_header