# coding: utf8

@cache(request.env.path_info,time_expire=50,cache_model=cache.disk)
def index():
    import datetime
    
    rows = db(db.feed.id==db.feed_entry.feed_id).select(orderby=~db.feed_entry.date)
    
    feeds = db(db.feed.id>0).select()
    
    response.title = PLANET_TITLE
    response.author = PLANET_AUTHOR 
    
    return response.render(dict(rows=rows, feeds=feeds, 
        now=datetime.datetime.now()))

def refresh():
    "Fetch feeds and save new entries in the database"
    import datetime
    import re
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser
    
    # filter for full feeds
    regex =  re.compile(PLANET_REGEX,re.I)
    
    feeds = db(db.feed.id>0).select()
    cnt = 0
    for feed in feeds:
        d = feedparser.parse(feed.url)
        # query current entries (date)
        rows = db(db.feed_entry.feed_id==feed.id).select(db.feed_entry.date)
        current_entries_date = set([row.date for row in rows]) 
        for entry in d.entries:
            date = datetime.datetime(*entry.date_parsed[:6])
            if date not in current_entries_date and (not feed.general or regex.search(entry.description)):
                db.feed_entry.insert(
                    feed_id=feed.id,
                    title=entry.title,
                    link=entry.link,
                    description=entry.description,
                    author=hasattr(entry, 'author_detail')
                         and entry.author_detail.name
                          or feed.author, 
                    date=date
                )
                cnt+=1
    return dict(result="%s entries added" % cnt, current_entries_date=current_entries_date, date=date)
    

@cache(request.env.path_info,time_expire=50,cache_model=cache.disk)
def rss():
    import datetime
    import gluon.contrib.rss2 as rss2
    
    rows = db(db.feed.id==db.feed_entry.feed_id).select(orderby=~db.feed_entry.date)
    
    now = datetime.datetime.now()
    
    rss = rss2.RSS2(title=PLANET_TITLE,
       link = URL(r=request,c="default",f="rss"),
       description = PLANET_AUTHOR,
       lastBuildDate = now,
       items = [
          rss2.RSSItem(
            title = row.feed_entry.title,
            link = row.feed_entry.link,
            description = row.feed_entry.description,
            author = row.feed_entry.author,
            # guid = rss2.Guid('unkown'),
            pubDate = now) for row in rows]
       )
    response.headers['Content-Type']='application/rss+xml'
    return rss2.dumps(rss)
            
@cache(request.env.path_info,time_expire=50,cache_model=cache.ram)
def planet():
    import datetime
    import re
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser
    
    # filter for general (not categorized) feeds
    regex =  re.compile('postgres',re.I)
    
    feeds = db(db.feed.id>0).select()

    entries = []
    
    for feed in feeds:
        # fetch and parse feeds
        d = feedparser.parse(feed.url)
        for entry in d.entries:
            if not feed.general or regex.search(entry.description):
                # extract entry attributes
                entries.append({
                    'feed': {'author':feed.author,'link':feed.link,'url':feed.url,'name':feed.name},
                    'title': entry.title,
                    'link': entry.link,
                    'description': entry.description,
                    'author': hasattr(entry, 'author_detail')
                         and entry.author_detail.name
                          or feed.author, 
                    'date': datetime.datetime(*entry.date_parsed[:6]) 
                })
        
    # sort entries by date, descending
    entries.sort(key=lambda x:x['date'],reverse=True)

    now = datetime.datetime.now()
    
    # agregate rss2 feed whit parsed entries
    rss = rss2.RSS2(title="Planet web2py",
       link = URL(r=request,c="default",f="planet"),
       description = "planet author",
       lastBuildDate = now,
       items = [
          rss2.RSSItem(
            title = entry['title'],
            link = entry['link'],
            description = entry['description'],
            author = entry['author'],
            # guid = rss2.Guid('unkown'),
            pubDate = entry['date']) for entry in entries]
       )
    response.headers['Content-Type']='application/rss+xml'
    return rss2.dumps(rss)
   
    


def user():
    """
    exposes:
    http://..../[app]/default/user/login 
    http://..../[app]/default/user/logout
    http://..../[app]/default/user/register
    http://..../[app]/default/user/profile
    http://..../[app]/default/user/retrieve_password
    http://..../[app]/default/user/change_password
    use @auth.requires_login()
        @auth.requires_membership('group name')
        @auth.requires_permission('read','table name',record_id)
    to decorate functions that need access control
    """
    return dict(form=auth())


def download():
    """
    allows downloading of uploaded files
    http://..../[app]/default/download/[filename]
    """
    return response.download(request,db)


def call():
    """
    exposes services. for example:
    http://..../[app]/default/call/jsonrpc
    decorate with @services.jsonrpc the functions to expose
    supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
    """
    session.forget()
    return service()
